diff --git a/.gitignore b/.gitignore index ff12bb7fa..3d227ec5e 100644 --- a/.gitignore +++ b/.gitignore @@ -25,3 +25,4 @@ nosetests.xml # pycharm project files .idea +/Brian2.egg-info diff --git a/brian2/codegen/codeobject.py b/brian2/codegen/codeobject.py index cda345a9c..241b8ec80 100644 --- a/brian2/codegen/codeobject.py +++ b/brian2/codegen/codeobject.py @@ -1,4 +1,5 @@ import functools +import weakref from brian2.core.variables import ArrayVariable from brian2.core.functions import Function @@ -21,12 +22,7 @@ def prepare_namespace(namespace, variables, codeobj_class): # We do the import here to avoid import problems from .runtime.numpy_rt.numpy_rt import NumpyCodeObject - namespace = dict(namespace) - # Add variables referring to the arrays - arrays = [] - for value in variables.itervalues(): - if isinstance(value, ArrayVariable): - arrays.append((value.arrayname, value.get_value())) + # Check that all functions are available for name, value in namespace.iteritems(): if isinstance(value, Function): @@ -39,12 +35,11 @@ def prepare_namespace(namespace, variables, codeobj_class): else: raise NotImplementedError(('Cannot use function ' '%s: %s') % (name, ex)) - namespace.update(arrays) return namespace -def create_codeobject(name, abstract_code, namespace, variables, template_name, +def create_codeobject(owner, name, abstract_code, namespace, variables, template_name, indices, variable_indices, codeobj_class, template_kwds=None): ''' @@ -97,10 +92,12 @@ def create_codeobject(name, abstract_code, namespace, variables, template_name, variables.update(indices) - code = template(snippet, variables=variables, codeobj_name=name, namespace=namespace, **template_kwds) + code = template(snippet, + owner=owner, variables=variables, codeobj_name=name, namespace=namespace, + **template_kwds) logger.debug(name + " code:\n" + str(code)) - codeobj = codeobj_class(code, namespace, variables, name=name) + codeobj = codeobj_class(owner, code, namespace, variables, name=name) codeobj.compile() return codeobj @@ -124,8 +121,13 @@ class CodeObject(Nameable): #: A short name for this type of `CodeObject` class_name = None - def __init__(self, code, namespace, variables, name='codeobject*'): + def __init__(self, owner, code, namespace, variables, name='codeobject*'): Nameable.__init__(self, name=name) + try: + owner = weakref.proxy(owner) + except TypeError: + pass # if owner was already a weakproxy then this will be the error raised + self.owner = owner self.code = code self.namespace = namespace self.variables = variables diff --git a/brian2/codegen/runtime/numpy_rt/numpy_rt.py b/brian2/codegen/runtime/numpy_rt/numpy_rt.py index da028de90..e3ab1468e 100644 --- a/brian2/codegen/runtime/numpy_rt/numpy_rt.py +++ b/brian2/codegen/runtime/numpy_rt/numpy_rt.py @@ -2,7 +2,8 @@ import numpy as np from brian2.core.preferences import brian_prefs, BrianPreference -from brian2.core.variables import Variable, Subexpression, DynamicArrayVariable +from brian2.core.variables import (Variable, Subexpression, + DynamicArrayVariable, ArrayVariable) from ...codeobject import CodeObject from ...templates import Templater @@ -34,10 +35,11 @@ class NumpyCodeObject(CodeObject): language = NumpyLanguage() class_name = 'numpy' - def __init__(self, code, namespace, variables, name='numpy_code_object*'): + def __init__(self, owner, code, namespace, variables, name='numpy_code_object*'): # TODO: This should maybe go somewhere else namespace['logical_not'] = np.logical_not - CodeObject.__init__(self, code, namespace, variables, name=name) + CodeObject.__init__(self, owner, code, namespace, variables, name=name) + namespace['_owner'] = self.owner def variables_to_namespace(self): # Variables can refer to values that are either constant (e.g. dt) @@ -49,20 +51,27 @@ def variables_to_namespace(self): self.nonconstant_values = [] for name, var in self.variables.iteritems(): - if isinstance(var, Variable) and not isinstance(var, Subexpression): - if not var.constant: - self.nonconstant_values.append((name, var.get_value)) - if isinstance(var, DynamicArrayVariable): - self.nonconstant_values.append((name+'_object', - var.get_object)) - else: - try: - value = var.get_value() - except TypeError: # A dummy Variable without value - continue - self.namespace[name] = value - if isinstance(var, DynamicArrayVariable): - self.namespace[name+'_object'] = var.get_object() + if isinstance(var, Subexpression): + continue + + if not var.constant: + self.nonconstant_values.append((name, var.get_value)) + if isinstance(var, ArrayVariable): + self.nonconstant_values.append((var.arrayname, + var.get_value)) + if isinstance(var, DynamicArrayVariable): + self.nonconstant_values.append((name+'_object', + var.get_object)) + else: + try: + value = var.get_value() + except TypeError: # A dummy Variable without value + continue + if isinstance(var, ArrayVariable): + self.namespace[var.arrayname] = var.get_value() + self.namespace[name] = value + if isinstance(var, DynamicArrayVariable): + self.namespace[var.name+'_object'] = var.get_object() def update_namespace(self): # update the values of the non-constant values in the namespace diff --git a/brian2/codegen/runtime/numpy_rt/templates/synapses_push_spikes.py_ b/brian2/codegen/runtime/numpy_rt/templates/synapses_push_spikes.py_ new file mode 100644 index 000000000..34e992d13 --- /dev/null +++ b/brian2/codegen/runtime/numpy_rt/templates/synapses_push_spikes.py_ @@ -0,0 +1 @@ +_owner.push_spikes() diff --git a/brian2/codegen/runtime/weave_rt/templates/synapses_push_spikes.cpp b/brian2/codegen/runtime/weave_rt/templates/synapses_push_spikes.cpp new file mode 100644 index 000000000..e75b133d1 --- /dev/null +++ b/brian2/codegen/runtime/weave_rt/templates/synapses_push_spikes.cpp @@ -0,0 +1,9 @@ +{% macro main() %} +{% endmacro %} + +{% macro support_code() %} +{% endmacro %} + +{% macro python_pre() %} +_owner.push_spikes() +{% endmacro %} diff --git a/brian2/codegen/runtime/weave_rt/weave_rt.py b/brian2/codegen/runtime/weave_rt/weave_rt.py index 35b01b3c4..110f23fd8 100644 --- a/brian2/codegen/runtime/weave_rt/weave_rt.py +++ b/brian2/codegen/runtime/weave_rt/weave_rt.py @@ -8,7 +8,8 @@ # No weave for Python 3 weave = None -from brian2.core.variables import Variable, Subexpression, DynamicArrayVariable +from brian2.core.variables import (Variable, Subexpression, + DynamicArrayVariable, ArrayVariable) from brian2.core.preferences import brian_prefs, BrianPreference from brian2.core.functions import DEFAULT_FUNCTIONS, FunctionImplementation @@ -68,10 +69,11 @@ class WeaveCodeObject(CodeObject): language = CPPLanguage(c_data_type=weave_data_type) class_name = 'weave' - def __init__(self, code, namespace, variables, name='weave_code_object*'): - super(WeaveCodeObject, self).__init__(code, namespace, variables, name=name) + def __init__(self, owner, code, namespace, variables, name='weave_code_object*'): + super(WeaveCodeObject, self).__init__(owner, code, namespace, variables, name=name) self.compiler = brian_prefs['codegen.runtime.weave.compiler'] self.extra_compile_args = brian_prefs['codegen.runtime.weave.extra_compile_args'] + self.python_code_namespace = {'_owner': owner} def variables_to_namespace(self): @@ -86,6 +88,8 @@ def variables_to_namespace(self): for name, var in self.variables.iteritems(): if isinstance(var, Variable) and not isinstance(var, Subexpression): if not var.constant: + if isinstance(var, ArrayVariable): + self.nonconstant_values.append((var.arrayname, var.get_value)) self.nonconstant_values.append((name, var.get_value)) if not var.scalar: self.nonconstant_values.append(('_num' + name, @@ -98,6 +102,8 @@ def variables_to_namespace(self): value = var.get_value() except TypeError: # A dummy Variable without value continue + if isinstance(var, ArrayVariable): + self.namespace[var.arrayname] = value self.namespace[name] = value # if it is a type that has a length, add a variable called # '_num'+name with its length @@ -111,13 +117,24 @@ def update_namespace(self): # update the values of the non-constant values in the namespace for name, func in self.nonconstant_values: self.namespace[name] = func() + + def compile(self): + CodeObject.compile(self) + if hasattr(self.code, 'python_pre'): + self.compiled_python_pre = compile(self.code.python_pre, '(string)', 'exec') + if hasattr(self.code, 'python_post'): + self.compiled_python_post = compile(self.code.python_post, '(string)', 'exec') def run(self): + if hasattr(self, 'compiled_python_pre'): + exec self.compiled_python_pre in self.python_code_namespace return weave.inline(self.code.main, self.namespace.keys(), local_dict=self.namespace, support_code=self.code.support_code, compiler=self.compiler, extra_compile_args=self.extra_compile_args) + if hasattr(self, 'compiled_python_post'): + exec self.compiled_python_post in self.python_code_namespace codegen_targets.add(WeaveCodeObject) diff --git a/brian2/core/clocks.py b/brian2/core/clocks.py index 30aa1217b..65580b1b3 100644 --- a/brian2/core/clocks.py +++ b/brian2/core/clocks.py @@ -8,7 +8,7 @@ from brian2.utils.logger import get_logger from brian2.core.names import Nameable -from brian2.units.fundamentalunits import check_units +from brian2.units.fundamentalunits import check_units, Quantity from brian2.units.allunits import second, msecond __all__ = ['Clock', 'defaultclock'] @@ -40,12 +40,12 @@ class Clock(Nameable): ''' @check_units(dt=second) - def __init__(self, dt=None, name='clock*'): - self._dt_spec = dt + def __init__(self, dt=0.1*msecond, name='clock*'): + self._dt = float(dt) self.i = 0 #: The time step of the simulation as an integer. self.i_end = 0 #: The time step the simulation will end as an integer Nameable.__init__(self, name=name) - logger.debug("Created clock {self.name} with dt={self._dt_spec}".format(self=self)) + logger.debug("Created clock {self.name} with dt={self._dt}".format(self=self)) def reinit(self): ''' @@ -77,18 +77,9 @@ def _set_t_end(self, end): self.i_end = int(float(end) / self.dt_) def _get_dt_(self): - if hasattr(self, '_dt'): - return self._dt - else: - dtspec = self._dt_spec - if dtspec is None: - dtspec = 0.1*msecond - self._dt = float(dtspec) - return self._dt + return self._dt def _set_dt_(self, dt_): - if hasattr(self, '_dt'): - raise RuntimeError("Cannot change dt, it has already been set to "+str(self.dt)) self._dt = dt_ logger.debug("Set dt for clock {self.name} to {self.dt}".format(self=self)) @@ -96,11 +87,9 @@ def _set_dt_(self, dt_): def _set_dt(self, dt): self.dt_ = float(dt) - dt = property(fget=lambda self: self.dt_*second, + dt = property(fget=lambda self: Quantity(self.dt_, dim=second.dim), fset=_set_dt, - doc='''The time step of the simulation in seconds - Returns a `Quantity`, and can only - be set once. Defaults to ``0.1*ms``.''', + doc='''The time step of the simulation in seconds.''', ) dt_ = property(fget=_get_dt_, fset=_set_dt_, diff --git a/brian2/core/variables.py b/brian2/core/variables.py index 704c8e92f..5c7c93643 100644 --- a/brian2/core/variables.py +++ b/brian2/core/variables.py @@ -109,7 +109,7 @@ def get_value(self): else: return self.value - def set_value(self): + def set_value(self, value, index=None): ''' Set the value associated with the variable. ''' @@ -145,6 +145,9 @@ def get_len(self): else: return len(self.get_value()) + def __len__(self): + return self.get_len() + def __repr__(self): description = ('<{classname}(unit={unit}, value={value}, ' 'dtype={dtype}, scalar={scalar}, constant={constant})>') @@ -415,8 +418,16 @@ def __init__(self, name, unit, value, group_name=None, constant=False, def get_value(self): return self.value - def set_value(self, value): - self.value[:] = value + def __getitem__(self, item): + return self.get_value()[item] + + def set_value(self, value, index=None): + if index is None: + index = slice(None) + self.value[index] = value + + def __setitem__(self, item, value): + self.set_value(value, item) def get_addressable_value(self, group, level=0): template = getattr(group, '_set_with_code_template', @@ -443,6 +454,9 @@ def get_value(self): def get_object(self): return self.value + def resize(self, new_size): + self.value.resize(new_size) + class Subexpression(Variable): ''' @@ -469,26 +483,14 @@ class Subexpression(Variable): Whether this is a boolean variable (also implies it is dimensionless). Defaults to ``False`` ''' - def __init__(self, unit, dtype, expr, variables, namespace, - is_bool=False): + def __init__(self, unit, dtype, expr, is_bool=False): Variable.__init__(self, unit, value=None, dtype=dtype, constant=False, scalar=False, is_bool=is_bool) #: The expression defining the static equation. self.expr = expr.strip() #: The identifiers used in the expression - self.identifiers = get_identifiers(expr) - #: Specifiers for the identifiers used in the expression - self.variables = variables - - #: The NeuronGroup's namespace for the identifiers used in the - #: expression - self.namespace = namespace - - #: An additional namespace provided by the run function (and updated - #: in `NeuronGroup.before_run`) that is used if the NeuronGroup does not - #: have an explicitly defined namespace. - self.additional_namespace = None + self.identifiers = get_identifiers(expr) def get_value(self): raise AssertionError('get_value should never be called for a Subexpression') diff --git a/brian2/devices/cpp_standalone/device.py b/brian2/devices/cpp_standalone/device.py index 57f6c5358..70e43fa03 100644 --- a/brian2/devices/cpp_standalone/device.py +++ b/brian2/devices/cpp_standalone/device.py @@ -3,14 +3,12 @@ import inspect from collections import defaultdict -from brian2.units import second from brian2.core.clocks import defaultclock -from brian2.devices.device import Device, set_device, all_devices +from brian2.devices.device import Device, all_devices from brian2.core.preferences import brian_prefs from brian2.core.variables import * from brian2.utils.filetools import copy_directory from brian2.utils.stringtools import word_substitute -from brian2.memory.dynamicarray import DynamicArray, DynamicArray1D from brian2.codegen.languages.cpp_lang import c_data_type from brian2.codegen.codeobject import CodeObjectUpdater @@ -25,41 +23,102 @@ def freeze(code, ns): code = word_substitute(code, {k: repr(v)}) return code +class StandaloneVariableView(): + ''' + Will store information about how the variable was set in the original + `ArrayVariable` object. + ''' + def __init__(self, variable): + self.variable = variable + + def __setitem__(self, key, value): + self.variable.assignments.append((key, value)) + + def __getitem__(self, item): + raise NotImplementedError() + +class StandaloneArrayVariable(ArrayVariable): + + def __init__(self, name, unit, size, dtype, group_name=None, constant=False, + is_bool=False): + self.assignments = [] + self.size = size + super(StandaloneArrayVariable, self).__init__(name, unit, value=None, + group_name=group_name, + constant=constant, + is_bool=is_bool) + self.dtype = dtype + + def get_len(self): + return self.size + + def get_value(self): + raise NotImplementedError() + + def set_value(self, value, index=None): + if index is None: + index = slice(None) + self.assignments.append((index, value)) + + def get_addressable_value(self, group, level=0): + return StandaloneVariableView(self) + + def get_addressable_value_with_unit(self, group, level=0): + return StandaloneVariableView(self) + + +class StandaloneDynamicArrayVariable(StandaloneArrayVariable): + + def resize(self, new_size): + self.assignments.append(('resize', new_size)) + class CPPStandaloneDevice(Device): ''' ''' def __init__(self): - self.arrays = {} - self.dynamic_arrays = {} + self.array_specs = [] + self.dynamic_array_specs = [] self.code_objects = {} - def array(self, owner, name, size, unit, dtype=None): - if dtype is None: + def array(self, owner, name, size, unit, dtype=None, constant=False, + is_bool=False): + if is_bool: + dtype = numpy.bool + elif dtype is None: dtype = brian_prefs['core.default_scalar_dtype'] - arr = numpy.zeros(size, dtype=dtype) - self.arrays['_array_%s_%s' % (owner.name, name)] = arr - return arr - - def dynamic_array_1d(self, owner, name, size, unit, dtype): - if dtype is None: + self.array_specs.append(('_array_%s_%s' % (owner.name, name), + c_data_type(dtype), size)) + return StandaloneArrayVariable(name, unit, size=size, dtype=dtype, + group_name=owner.name, + constant=constant, is_bool=is_bool) + + def dynamic_array_1d(self, owner, name, size, unit, dtype=None, + constant=False, is_bool=False): + if is_bool: + dtype = numpy.bool + elif dtype is None: dtype = brian_prefs['core.default_scalar_dtype'] - arr = DynamicArray1D(size, dtype=dtype) - self.dynamic_arrays['_dynamic_array_%s_%s' % (owner.name, name)] = arr - return arr + self.dynamic_array_specs.append(('_dynamic_array_%s_%s' % (owner.name, name), + c_data_type(dtype))) + return StandaloneDynamicArrayVariable(name, unit, size=size, + dtype=dtype, + group_name=owner.name, + constant=constant, is_bool=is_bool) - def dynamic_array(self): - raise NotImplentedError + def dynamic_array(self, owner, name, size, unit, dtype=None, + constant=False, is_bool=False): + raise NotImplementedError() def code_object_class(self, codeobj_class=None): if codeobj_class is not None: raise ValueError("Cannot specify codeobj_class for C++ standalone device.") return CPPStandaloneCodeObject - def code_object(self, name, abstract_code, namespace, variables, template_name, + def code_object(self, owner, name, abstract_code, namespace, variables, template_name, indices, variable_indices, codeobj_class=None, template_kwds=None): - codeobj = super(CPPStandaloneDevice, self).code_object(name, abstract_code, namespace, variables, + codeobj = super(CPPStandaloneDevice, self).code_object(owner, name, abstract_code, namespace, variables, template_name, indices, variable_indices, codeobj_class=codeobj_class, template_kwds=template_kwds, @@ -86,11 +145,9 @@ def build(self, net): if not os.path.exists('output'): os.mkdir('output') - # Write the arrays - array_specs = [(k, c_data_type(v.dtype), len(v)) for k, v in self.arrays.iteritems()] - dynamic_array_specs = [(k, c_data_type(v.dtype)) for k, v in self.dynamic_arrays.iteritems()] - arr_tmp = CPPStandaloneCodeObject.templater.arrays(None, array_specs=array_specs, - dynamic_array_specs=dynamic_array_specs) + # Write the arrays + arr_tmp = CPPStandaloneCodeObject.templater.arrays(None, array_specs=self.array_specs, + dynamic_array_specs=self.dynamic_array_specs) open('output/arrays.cpp', 'w').write(arr_tmp.cpp_file) open('output/arrays.h', 'w').write(arr_tmp.h_file) @@ -103,9 +160,9 @@ def build(self, net): elif isinstance(v, Subexpression): pass elif not v.scalar: - N = v.get_len() + N = len(v) code_object_defs[codeobj.name].append('const int _num%s = %s;' % (k, N)) - if isinstance(v, DynamicArrayVariable): + if isinstance(v, StandaloneDynamicArrayVariable): c_type = c_data_type(v.dtype) # Create an alias name for the underlying array code = ('{c_type}* {arrayname} = ' diff --git a/brian2/devices/device.py b/brian2/devices/device.py index 3d1efefae..26aab1458 100644 --- a/brian2/devices/device.py +++ b/brian2/devices/device.py @@ -1,8 +1,10 @@ -import numpy +import numpy as np + from brian2.memory.dynamicarray import DynamicArray, DynamicArray1D from brian2.codegen.codeobject import create_codeobject from brian2.codegen.targets import codegen_targets from brian2.core.preferences import brian_prefs +from brian2.core.variables import ArrayVariable, DynamicArrayVariable __all__ = ['Device', 'RuntimeDevice', 'get_device', 'set_device', @@ -36,25 +38,28 @@ class Device(object): def __init__(self): pass - def create_array(self, owner, name, size, unit, dtype=None): - pass + def array(self, owner, name, size, unit, dtype=None, constant=False, + is_bool=False): + raise NotImplementedError() - def create_dynamic_array_1d(self, owner, name, size, unit, dtype=None): - pass + def dynamic_array_1d(self, owner, name, size, unit, dtype=None, + constant=False, is_bool=False): + raise NotImplementedError() + + def dynamic_array(self, owner, name, size, unit, dtype=None, + constant=False, is_bool=False): + raise NotImplementedError() - def create_dynamic_array(self, owner, name, size, unit, dtype=None): - pass - def code_object_class(self, codeobj_class=None): if codeobj_class is None: codeobj_class = get_default_codeobject_class() return codeobj_class - def code_object(self, name, abstract_code, namespace, variables, template_name, + def code_object(self, owner, name, abstract_code, namespace, variables, template_name, indices, variable_indices, codeobj_class=None, template_kwds=None): codeobj_class = self.code_object_class(codeobj_class) - return create_codeobject(name, abstract_code, namespace, variables, template_name, + return create_codeobject(owner, name, abstract_code, namespace, variables, template_name, indices, variable_indices, codeobj_class=codeobj_class, template_kwds=template_kwds) @@ -69,22 +74,37 @@ class RuntimeDevice(Device): ''' ''' def __init__(self): - pass + super(Device, self).__init__() - def array(self, owner, name, size, unit, dtype=None): - if dtype is None: + def array(self, owner, name, size, unit, dtype=None, + constant=False, is_bool=False): + if is_bool: + dtype = np.bool + elif dtype is None: dtype = brian_prefs['core.default_scalar_dtype'] - return numpy.zeros(size, dtype=dtype) - - def dynamic_array_1d(self, owner, name, size, unit, dtype): + array = np.zeros(size, dtype=dtype) + return ArrayVariable(name, unit, array, group_name=owner.name, + constant=constant, is_bool=is_bool) + + def dynamic_array_1d(self, owner, name, size, unit, dtype=None, + constant=False, is_bool=False): + if is_bool: + dtype = np.bool if dtype is None: dtype = brian_prefs['core.default_scalar_dtype'] - return DynamicArray1D(size, dtype=dtype) - - def dynamic_array(self, owner, name, size, unit, dtype): + array = DynamicArray1D(size, dtype=dtype) + return DynamicArrayVariable(name, unit, array, group_name=owner.name, + constant=constant, is_bool=is_bool) + + def dynamic_array(self, owner, name, size, unit, dtype=None, + constant=False, is_bool=False): + if is_bool: + dtype = np.bool if dtype is None: dtype = brian_prefs['core.default_scalar_dtype'] - return DynamicArray(size, dtype=dtype) + array = DynamicArray(size, dtype=dtype) + return DynamicArrayVariable(name, unit, array, group_name=owner.name, + constant=constant, is_bool=is_bool) runtime_device = RuntimeDevice() diff --git a/brian2/groups/group.py b/brian2/groups/group.py index 6198f188b..ee52674b7 100644 --- a/brian2/groups/group.py +++ b/brian2/groups/group.py @@ -13,7 +13,6 @@ from brian2.core.namespace import get_local_namespace from brian2.units.fundamentalunits import fail_for_dimension_mismatch, Unit from brian2.units.allunits import second -from brian2.codegen.codeobject import create_codeobject from brian2.codegen.translation import analyse_identifiers from brian2.equations.unitcheck import check_units_statements from brian2.utils.logger import get_logger @@ -77,14 +76,14 @@ def __getitem__(self, index): return self._indices[index_array + self.offset] -class Group(object): +class Group(BrianObject): ''' Mix-in class for accessing arrays by attribute. # TODO: Overwrite the __dir__ method to return the state variables # (should make autocompletion work) ''' - def __init__(self): + def _enable_group_attributes(self): if not hasattr(self, 'offset'): self.offset = 0 if not hasattr(self, 'variables'): @@ -159,7 +158,7 @@ def __getattr__(self, name): def __setattr__(self, name, val): # attribute access is switched off until this attribute is created by - # Group.__init__ + # _enable_group_attributes if not hasattr(self, '_group_attribute_access_active'): object.__setattr__(self, name, val) elif name in self.variables: @@ -168,12 +167,12 @@ def __setattr__(self, name, val): fail_for_dimension_mismatch(val, var.unit, 'Incorrect units for setting %s' % name) # Make the call X.var = ... equivalent to X.var[:] = ... - var.get_addressable_value_with_unit(self, level=1)[:] = val + var.get_addressable_value_with_unit(self, level=1)[slice(None)] = val elif len(name) and name[-1]=='_' and name[:-1] in self.variables: # no unit checking var = self.variables[name[:-1]] # Make the call X.var = ... equivalent to X.var[:] = ... - var.get_addressable_value(self, level=1)[:] = val + var.get_addressable_value(self, level=1)[slice(None)] = val else: object.__setattr__(self, name, val) @@ -383,6 +382,7 @@ def create_runner_codeobj(group, code, template_name, indices=None, variable_indices = group.variable_indices return get_device().code_object( + group, name, code, resolved_namespace, diff --git a/brian2/groups/neurongroup.py b/brian2/groups/neurongroup.py index 91ba5c805..4a3a1cb82 100644 --- a/brian2/groups/neurongroup.py +++ b/brian2/groups/neurongroup.py @@ -1,6 +1,8 @@ ''' This model defines the `NeuronGroup`, the core of most simulations. ''' +from collections import defaultdict + import numpy as np import sympy @@ -10,10 +12,8 @@ from brian2.stateupdaters.base import StateUpdateMethod from brian2.devices.device import get_device from brian2.core.preferences import brian_prefs -from brian2.core.base import BrianObject from brian2.core.namespace import create_namespace -from brian2.core.variables import (ArrayVariable, StochasticVariable, - Subexpression) +from brian2.core.variables import (StochasticVariable, Subexpression) from brian2.core.spikesource import SpikeSource from brian2.core.scheduler import Scheduler from brian2.parsing.expressions import (parse_expression_unit, @@ -145,7 +145,7 @@ def update_abstract_code(self): self.abstract_code = self.group.reset -class NeuronGroup(BrianObject, Group, SpikeSource): +class NeuronGroup(Group, SpikeSource): ''' A group of neurons. @@ -178,9 +178,10 @@ class NeuronGroup(BrianObject, Group, SpikeSource): the local and global namespace surrounding the creation of the class, is used. dtype : (`dtype`, `dict`), optional - The `numpy.dtype` that will be used to store the values, or - `core.default_scalar_dtype` if not specified (`numpy.float64` by - default). + The `numpy.dtype` that will be used to store the values, or a + dictionary specifying the type for variable names. If a value is not + provided for a variable (or no value is provided at all), the preference + setting `core.default_scalar_dtype` is used. codeobj_class : class, optional The `CodeObject` class to run code with. clock : Clock, optional @@ -204,7 +205,7 @@ def __init__(self, N, model, method=None, dtype=None, clock=None, name='neurongroup*', codeobj_class=None): - BrianObject.__init__(self, when=clock, name=name) + Group.__init__(self, when=clock, name=name) self.codeobj_class = codeobj_class @@ -238,16 +239,11 @@ def __init__(self, N, model, method=None, logger.debug("Creating NeuronGroup of size {self.N}, " "equations {self.equations}.".format(self=self)) - ##### Setup the memory - self.arrays = self._allocate_memory(dtype=dtype) - - self._spikespace = get_device().array(self, '_spikespace', N+1, 1, dtype=np.int32) - # Setup the namespace self.namespace = create_namespace(namespace) # Setup variables - self.variables = self._create_variables() + self.variables = self._create_variables(dtype) # All of the following will be created in before_run @@ -296,7 +292,7 @@ def __init__(self, N, model, method=None, self.contained_objects.append(self.resetter) # Activate name attribute access - Group.__init__(self) + self._enable_group_attributes() # Set the refractoriness information self.lastspike = -np.inf*second @@ -313,7 +309,10 @@ def spikes(self): ''' The spikes returned by the most recent thresholding operation. ''' - return self._spikespace[:self._spikespace[-1]] + # Note that we have to directly access the ArrayVariable object here + # instead of using the Group mechanism by accessing self._spikespace + # Using the latter would cut _spikespace to the length of the group + return self.variables['_spikespace'][:self.variables['_spikespace'][-1]] def __getitem__(self, item): if not isinstance(item, slice): @@ -327,29 +326,6 @@ def __getitem__(self, item): return Subgroup(self, start, stop) - def _allocate_memory(self, dtype=None): - # Allocate memory (TODO: this should be refactored somewhere at some point) - - arrays = {} - for eq in self.equations.itervalues(): - if eq.type == STATIC_EQUATION: - # nothing to do - continue - name = eq.varname - if isinstance(dtype, dict): - curdtype = dtype[name] - else: - curdtype = dtype - if curdtype is None: - curdtype = brian_prefs['core.default_scalar_dtype'] - if eq.is_bool: - arrays[name] = get_device().array(self, name, self.N, 1, dtype=np.bool) - else: - # TODO: specify unit here - arrays[name] = get_device().array(self, name, self.N, 1, dtype=curdtype) - logger.debug("NeuronGroup memory allocated successfully.") - return arrays - def runner(self, code, when=None, name=None): ''' Returns a `CodeRunner` that runs abstract code in the groups namespace @@ -379,37 +355,41 @@ def runner(self, code, when=None, name=None): code=code, name=name, when=when) return runner - def _create_variables(self): + def _create_variables(self, dtype=None): ''' Create the variables dictionary for this `NeuronGroup`, containing entries for the equation variables and some standard entries. ''' + device = get_device() # Get the standard variables for all groups s = Group._create_variables(self) + if dtype is None: + dtype = defaultdict(lambda: brian_prefs['core.default_scalar_dtype']) + elif isinstance(dtype, np.dtype): + dtype = defaultdict(lambda: dtype) + elif not hasattr(dtype, '__getitem__'): + raise TypeError(('Cannot use type %s as dtype ' + 'specification') % type(dtype)) + # Standard variables always present - s.update({'_spikespace': ArrayVariable('_spikespace', Unit(1), - self._spikespace, - group_name=self.name)}) + s['_spikespace'] = device.array(self, '_spikespace', self.N+1, + Unit(1), dtype=np.int32, + constant=False) for eq in self.equations.itervalues(): if eq.type in (DIFFERENTIAL_EQUATION, PARAMETER): - array = self.arrays[eq.varname] constant = ('constant' in eq.flags) - s.update({eq.varname: ArrayVariable(eq.varname, - eq.unit, - array, - group_name=self.name, - constant=constant, - is_bool=eq.is_bool)}) + s[eq.varname] = device.array(self, eq.varname, self.N, eq.unit, + dtype=dtype[eq.varname], + constant=constant, + is_bool=eq.is_bool) elif eq.type == STATIC_EQUATION: - s.update({eq.varname: Subexpression(eq.unit, - brian_prefs['core.default_scalar_dtype'], - str(eq.expr), - variables=s, - namespace=self.namespace, - is_bool=eq.is_bool)}) + s[eq.varname] = Subexpression(eq.unit, + brian_prefs['core.default_scalar_dtype'], + str(eq.expr), + is_bool=eq.is_bool) else: raise AssertionError('Unknown type of equation: ' + eq.eq_type) diff --git a/brian2/groups/poissongroup.py b/brian2/groups/poissongroup.py index 10f9c323c..82a7d40b9 100644 --- a/brian2/groups/poissongroup.py +++ b/brian2/groups/poissongroup.py @@ -1,13 +1,11 @@ import numpy as np -from brian2.core.base import BrianObject from brian2.core.namespace import create_namespace from brian2.core.spikesource import SpikeSource from brian2.core.variables import ArrayVariable from brian2.devices.device import get_device from brian2.equations import Equations -from brian2.units.fundamentalunits import check_units, Unit -from brian2.units.allunits import second +from brian2.units.fundamentalunits import check_units from brian2.units.stdunits import Hz from .group import Group @@ -16,7 +14,7 @@ __all__ = ['PoissonGroup'] -class PoissonGroup(Group, BrianObject, SpikeSource): +class PoissonGroup(Group, SpikeSource): ''' Poisson spike source @@ -41,13 +39,11 @@ class PoissonGroup(Group, BrianObject, SpikeSource): def __init__(self, N, rates, clock=None, name='poissongroup*', codeobj_class=None): - BrianObject.__init__(self, when=clock, name=name) + Group.__init__(self, when=clock, name=name) self.codeobj_class = codeobj_class self.N = N = int(N) - #: The array holding the spikes - self._spikespace = get_device().array(self, '_spikespace', N+1, 1, dtype=np.int32) #: The array holding the rates self._rates = np.asarray(rates) @@ -61,27 +57,22 @@ def __init__(self, N, rates, clock=None, name='poissongroup*', # users write their own NeuronGroup (with threshold rand() < rates*dt) # for more complex use cases. - #: The array storing the refractoriness information (not used, currently) - self._not_refractory = get_device().array(self, '_not_refractory', N, 1, - dtype=np.bool) - self._lastspike = get_device().array(self, '_lastspike', N, 1) - self.variables = Group._create_variables(self) self.variables.update({'rates': ArrayVariable('rates', Hz, self._rates, group_name=self.name, constant=True), - '_spikespace': ArrayVariable('_spikespace', Unit(1), - self._spikespace, - group_name=self.name), - 'not_refractory': ArrayVariable('not_refractory', - Unit(1), - self._not_refractory, - group_name=self.name, - is_bool=True), - 'lastspike': ArrayVariable('lastspike', - second, - self._lastspike, - group_name=self.name)}) + '_spikespace': get_device().array(self, + '_spikespace', + N+1, + 1, + dtype=np.int32), + 'not_refractory': get_device().array(self, + '_not_refractory', + N, 1, + dtype=np.bool), + 'lastspike': get_device().array(self, + '_lastspike', + N, 1)}) self.namespace = create_namespace(None) @@ -95,14 +86,17 @@ def __init__(self, N, rates, clock=None, name='poissongroup*', self._refractory = False self.state_updater = StateUpdater(self, method='independent') self.contained_objects.append(self.state_updater) - Group.__init__(self) + self._enable_group_attributes() @property def spikes(self): ''' The spikes returned by the most recent thresholding operation. ''' - return self._spikespace[:self._spikespace[-1]] + # Note that we have to directly access the ArrayVariable object here + # instead of using the Group mechanism by accessing self._spikespace + # Using the latter would cut _spikespace to the length of the group + return self.variables['_spikespace'][:self.variables['_spikespace'][-1]] def __len__(self): return self.N diff --git a/brian2/groups/subgroup.py b/brian2/groups/subgroup.py index 906dcf978..be5b8971d 100644 --- a/brian2/groups/subgroup.py +++ b/brian2/groups/subgroup.py @@ -1,8 +1,5 @@ import weakref -import numpy as np - -from brian2.core.base import BrianObject from brian2.core.spikesource import SpikeSource from brian2.core.scheduler import Scheduler from brian2.groups.group import Group @@ -10,7 +7,7 @@ __all__ = ['Subgroup'] -class Subgroup(Group, BrianObject, SpikeSource): +class Subgroup(Group, SpikeSource): ''' Subgroup of any `Group` @@ -45,7 +42,7 @@ def __init__(self, source, start, end, name=None): # parent threshold operation schedule = Scheduler(clock=source.clock, when='thresholds', order=source.order+1) - BrianObject.__init__(self, when=schedule, name=name) + Group.__init__(self, when=schedule, name=name) self.N = end-start self.start = start self.end = end @@ -56,7 +53,7 @@ def __init__(self, source, start, end, name=None): self.namespace = self.source.namespace self.codeobj_class = self.source.codeobj_class - Group.__init__(self) + self._enable_group_attributes() # Make the spikes from the source group accessible spikes = property(lambda self: self.source.spikes) diff --git a/brian2/monitors/ratemonitor.py b/brian2/monitors/ratemonitor.py index e50dc5b8f..807da39c3 100644 --- a/brian2/monitors/ratemonitor.py +++ b/brian2/monitors/ratemonitor.py @@ -4,10 +4,8 @@ import numpy as np from brian2.core.base import BrianObject -from brian2.core.preferences import brian_prefs from brian2.core.scheduler import Scheduler -from brian2.core.variables import (Variable, AttributeVariable, - DynamicArrayVariable) +from brian2.core.variables import (Variable, AttributeVariable) from brian2.units.allunits import second, hertz from brian2.units.fundamentalunits import Unit, Quantity from brian2.devices.device import get_device @@ -47,34 +45,27 @@ def __init__(self, source, when=None, name='ratemonitor*', self.codeobj_class = codeobj_class BrianObject.__init__(self, when=scheduler, name=name) - # create data structures - self.reinit() - + dev = get_device() self.variables = {'t': AttributeVariable(second, self.clock, 't_'), - 'dt': AttributeVariable(second, self.clock, - 'dt_', constant=True), + 'dt': AttributeVariable(second, self.clock, + 'dt_', constant=True), '_spikespace': self.source.variables['_spikespace'], - '_rate': DynamicArrayVariable('_rate', Unit(1), - self._rate, - group_name=self.name), - '_t': DynamicArrayVariable('_t', Unit(1), - self._t, - group_name=self.name), - '_num_source_neurons': Variable(Unit(1), - len(self.source))} + '_rate': dev.dynamic_array_1d(self, '_rate', 0, 1), + '_t': dev.dynamic_array_1d(self, '_t', 0, second, + dtype=getattr(self.clock.t, 'dtype', + np.dtype(type(self.clock.t)))), + '_num_source_neurons': Variable(Unit(1), + len(self.source))} def reinit(self): ''' Clears all recorded rates ''' - dev = get_device() - self._rate = dev.dynamic_array_1d(self, '_rate', 0, 1, dtype=brian_prefs['core.default_scalar_dtype']) - self._t = dev.dynamic_array_1d(self, '_t', 0, second, - dtype=getattr(self.clock.t, 'dtype', - np.dtype(type(self.clock.t)))) + raise NotImplementedError() def before_run(self, namespace): self.codeobj = get_device().code_object( + self, self.name+'_codeobject*', '', # No model-specific code {}, # no namespace @@ -90,28 +81,30 @@ def rate(self): ''' Array of recorded rates (in units of Hz). ''' - return Quantity(self._rate.data.copy(), dim=hertz.dim) + return Quantity(self.variables['_rate'].get_value(), dim=hertz.dim, + copy=True) @property def rate_(self): ''' Array of recorded rates (unitless). ''' - return self._rate.data.copy() + return self.variables['_rate'].get_value().copy() @property def t(self): ''' Array of recorded time points (in units of second). ''' - return Quantity(self._t.data.copy(), dim=second.dim) + return Quantity(self.variables['_t'].get_value(), dim=second.dim, + copy=True) @property def t_(self): ''' Array of recorded time points (unitless). ''' - return self._t.data.copy() + return self.variables['_t'].get_value().copy() def __repr__(self): description = '<{classname}, recording {source}>' diff --git a/brian2/monitors/spikemonitor.py b/brian2/monitors/spikemonitor.py index 25ce4bb68..1fbca0abf 100644 --- a/brian2/monitors/spikemonitor.py +++ b/brian2/monitors/spikemonitor.py @@ -3,13 +3,11 @@ import numpy as np -from brian2.codegen.codeobject import create_codeobject from brian2.core.base import BrianObject -from brian2.core.preferences import brian_prefs from brian2.core.scheduler import Scheduler -from brian2.core.variables import ArrayVariable, AttributeVariable, Variable, DynamicArrayVariable +from brian2.core.variables import AttributeVariable, Variable from brian2.units.allunits import second -from brian2.units.fundamentalunits import Unit +from brian2.units.fundamentalunits import Unit, Quantity from brian2.devices.device import get_device __all__ = ['SpikeMonitor'] @@ -49,19 +47,22 @@ def __init__(self, source, record=True, when=None, name='spikemonitor*', self.codeobj_class = codeobj_class BrianObject.__init__(self, when=scheduler, name=name) - - # create data structures - self.reinit() # Handle subgroups correctly start = getattr(self.source, 'start', 0) end = getattr(self.source, 'end', len(self.source)) + device = get_device() self.variables = {'t': AttributeVariable(second, self.clock, 't_'), '_spikespace': self.source.variables['_spikespace'], - '_i': DynamicArrayVariable('_i', Unit(1), self._i, group_name=self.name), - '_t': DynamicArrayVariable('_t', Unit(1), self._t, group_name=self.name), - '_count': ArrayVariable('_count', Unit(1), self.count, group_name=self.name), + '_i': device.dynamic_array_1d(self, '_i', 0, Unit(1), + dtype=np.int32), + '_t': device.dynamic_array_1d(self, '_t', 0, + Unit(1)), + '_count': device.array(self, '_count', + len(self.source), + Unit(1), + dtype=np.int32), '_source_start': Variable(Unit(1), start, constant=True), '_source_end': Variable(Unit(1), end, @@ -71,15 +72,12 @@ def reinit(self): ''' Clears all recorded spikes ''' - dev = get_device() - self._i = dev.dynamic_array_1d(self, '_i', 0, 1, dtype=np.int32) - self._t = dev.dynamic_array_1d(self, '_t', 0, 1, dtype=brian_prefs['core.default_scalar_dtype']) - - #: Array of the number of times each source neuron has spiked - self.count = get_device().array(self, '_count', len(self.source), 1, dtype=np.int32) + raise NotImplementedError() def before_run(self, namespace): - self.codeobj = get_device().code_object(self.name+'_codeobject*', + self.codeobj = get_device().code_object( + self, + self.name+'_codeobject*', '', # No model-specific code {}, # no namespace self.variables, @@ -95,21 +93,22 @@ def i(self): ''' Array of recorded spike indices, with corresponding times `t`. ''' - return self._i.data.copy() + return self.variables['_i'].get_value().copy() @property def t(self): ''' Array of recorded spike times, with corresponding indices `i`. ''' - return self._t.data.copy()*second + return Quantity(self.variables['_t'].get_value(), dim=second.dim, + copy=True) @property def t_(self): ''' Array of recorded spike times without units, with corresponding indices `i`. ''' - return self._t.data.copy() + return self.variables['_t'].get_value().copy() @property def it(self): @@ -124,7 +123,14 @@ def it_(self): Returns the pair (`i`, `t_`). ''' return self.i, self.t_ - + + @property + def count(self): + ''' + Return the total spike count for each neuron. + ''' + return self.variables['_count'].get_value().copy() + @property def num_spikes(self): ''' diff --git a/brian2/monitors/statemonitor.py b/brian2/monitors/statemonitor.py index 001be2f7e..7e620f225 100644 --- a/brian2/monitors/statemonitor.py +++ b/brian2/monitors/statemonitor.py @@ -3,13 +3,11 @@ import numpy as np -from brian2.core.variables import (Variable, AttributeVariable, ArrayVariable, - DynamicArrayVariable) +from brian2.core.variables import (AttributeVariable, ArrayVariable) from brian2.core.base import BrianObject from brian2.core.scheduler import Scheduler -from brian2.core.preferences import brian_prefs from brian2.devices.device import get_device -from brian2.units.fundamentalunits import Unit, Quantity, have_same_dimensions +from brian2.units.fundamentalunits import Unit, Quantity from brian2.units.allunits import second from brian2.groups.group import create_runner_codeobj @@ -35,16 +33,18 @@ def __getattr__(self, item): if not hasattr(self, '_group_attribute_access_active'): raise AttributeError + mon = self.monitor if item == 't': - return Quantity(self.monitor._t.data.copy(), dim=second.dim) + return Quantity(mon.variables['_t'].get_value(), dim=second.dim, + copy=True) elif item == 't_': - return self.monitor._t.data.copy() - elif item in self.monitor.record_variables: - unit = self.monitor.variables[item].unit - return Quantity(self.monitor._values[item].data.T[self.indices].copy(), - dim=unit.dim) - elif item.endswith('_') and item[:-1] in self.monitor.record_variables: - return self.monitor._values[item[:-1]].data.T[self.indices].copy() + return mon._t.data.copy() + elif item in mon.record_variables: + unit = mon.variables[item].unit + return Quantity(mon.variables['_recorded_'+item].get_value().T[self.indices], + dim=unit.dim, copy=True) + elif item.endswith('_') and item[:-1] in mon.record_variables: + return mon.variables['_recorded_'+item[:-1]].get_value().T[self.indices].copy() else: raise AttributeError('Unknown attribute %s' % item) @@ -168,10 +168,9 @@ def __init__(self, source, variables, record=None, when=None, #: The array of recorded indices self.indices = record - # create data structures - self.reinit() # Setup variables + device = get_device() self.variables = {} for varname in variables: var = source.variables[varname] @@ -182,13 +181,15 @@ def __init__(self, source, variables, record=None, when=None, 'doubles can be recorded.') % (varname, var.dtype)) self.variables[varname] = var - self.variables['_recorded_'+varname] = DynamicArrayVariable('_recorded_'+varname, - Unit(1), - self._values[varname], - group_name=self.name) + self.variables['_recorded_'+varname] = device.dynamic_array(self, + '_recorded_'+varname, + (0, len(self.indices)), + var.unit, + dtype=var.dtype, + constant=False) - self.variables['_t'] = DynamicArrayVariable('_t', Unit(1), self._t, - group_name=self.name) + self.variables['_t'] = device.dynamic_array_1d(self, '_t', 0, Unit(1), + constant=False) self.variables['_clock_t'] = AttributeVariable(second, self.clock, 't_') self.variables['_indices'] = ArrayVariable('_indices', Unit(1), self.indices, @@ -197,16 +198,7 @@ def __init__(self, source, variables, record=None, when=None, self._group_attribute_access_active = True def reinit(self): - dev = get_device() - vars = self.source.variables - self._values = dict((v, dev.dynamic_array(self, '_values', (0, len(self.indices)), - vars[v].unit, - dtype=vars[v].dtype)) - for v in self.record_variables) - self._t = dev.dynamic_array_1d(self, '_t', 0, second, - dtype=brian_prefs['core.default_scalar_dtype']) - # FIXME: This does not update the variables dictionary with the new - # references + raise NotImplementedError() def before_run(self, namespace): # Some dummy code so that code generation takes care of the indexing @@ -254,18 +246,16 @@ def __getattr__(self, item): # TODO: Decide about the interface if item == 't': - return Quantity(self._t.data.copy(), dim=second.dim) + return Quantity(self.variables['_t'].get_value(), + dim=second.dim, copy=True) elif item == 't_': - return self._t.data.copy() + return self.variables['_t'].get_value().copy() elif item in self.record_variables: unit = self.variables[item].unit - if have_same_dimensions(unit, 1): - return self._values[item].data.T.copy() - else: - return Quantity(self._values[item].data.T.copy(), - dim=unit.dim) + return Quantity(self.variables['_recorded_'+item].get_value().T, + dim=unit.dim, copy=True) elif item.endswith('_') and item[:-1] in self.record_variables: - return self._values[item[:-1]].data.T.copy() + return self.variables['_recorded_'+item[:-1]].get_value().T else: raise AttributeError('Unknown attribute %s' % item) diff --git a/brian2/synapses/spikequeue.py b/brian2/synapses/spikequeue.py index 60305bcbf..190d73d5f 100644 --- a/brian2/synapses/spikequeue.py +++ b/brian2/synapses/spikequeue.py @@ -116,6 +116,46 @@ def compress(self, delays, synapse_targets, n_synapses): if self._precompute_offsets: self.precompute_offsets(delays, synapse_targets, n_synapses) + def extract_spikes(self): + ''' + Get all the stored spikes + + Returns + ------- + spikes : ndarray + A 2d array with two columns, where each row describes a spike. + The first column gives the time (as integer time steps) and the + second column gives the index of the target synapse. + ''' + spikes = np.zeros((np.sum(self.n), 2)) + counter = 0 + for idx, n in enumerate(self.n): + t = (idx - self.currenttime) % len(self.n) + for target in self.X[idx, :n]: + spikes[counter, :] = np.array([t, target]) + counter += 1 + return spikes + + def store_spikes(self, spikes): + ''' + Store a list of spikes at the given positions after clearing all + spikes in the queue. + + Parameters + ---------- + spikes : ndarray + A 2d array with two columns, where each row describes a spike. + The first column gives the time (as integer time steps) and the + second column gives the index of the target synapse. + + ''' + # Clear all spikes + self.n[:] = 0 + for t, target in spikes: + row_idx = (t + self.currenttime) % len(self.n) + self.X[row_idx, self.n[row_idx]] = target + self.n[row_idx] += 1 + ################################ SPIKE QUEUE DATASTRUCTURE ################ def next(self): ''' diff --git a/brian2/synapses/synapses.py b/brian2/synapses/synapses.py index f6f318fa1..3a1765807 100644 --- a/brian2/synapses/synapses.py +++ b/brian2/synapses/synapses.py @@ -1,1016 +1,1015 @@ -import collections -from collections import defaultdict -import weakref -import itertools -import re - -import numpy as np - -from brian2.core.base import BrianObject, Updater -from brian2.core.namespace import create_namespace -from brian2.core.preferences import brian_prefs -from brian2.core.variables import (ArrayVariable, DynamicArrayVariable, - Variable, Subexpression, AttributeVariable, - StochasticVariable) -from brian2.equations.equations import (Equations, SingleEquation, - DIFFERENTIAL_EQUATION, STATIC_EQUATION, - PARAMETER) -from brian2.groups.group import Group, GroupCodeRunner, create_runner_codeobj -from brian2.memory.dynamicarray import DynamicArray1D -from brian2.stateupdaters.base import StateUpdateMethod -from brian2.stateupdaters.exact import independent -from brian2.units.fundamentalunits import (Unit, Quantity, - fail_for_dimension_mismatch) -from brian2.units.allunits import second -from brian2.utils.logger import get_logger -from brian2.utils.stringtools import get_identifiers -from brian2.core.namespace import get_local_namespace - -from .spikequeue import SpikeQueue - -MAX_SYNAPSES = 2147483647 - -__all__ = ['Synapses'] - -logger = get_logger(__name__) - - -class StateUpdater(GroupCodeRunner): - ''' - The `GroupCodeRunner` that updates the state variables of a `Synapses` - at every timestep. - ''' - def __init__(self, group, method): - self.method_choice = method - GroupCodeRunner.__init__(self, group, - 'synaptic_stateupdate', - when=(group.clock, 'groups'), - name=group.name + '_stateupdater', - check_units=False) - - self.method = StateUpdateMethod.determine_stateupdater(self.group.equations, - self.group.variables, - method) - - def update_abstract_code(self): - - self.method = StateUpdateMethod.determine_stateupdater(self.group.equations, - self.group.variables, - self.method_choice) - - self.abstract_code = self.method(self.group.equations, - self.group.variables) - - -class LumpedUpdater(GroupCodeRunner): - ''' - The `GroupCodeRunner` that updates a value in the target group with the - sum over values in the `Synapses` object. - ''' - def __init__(self, varname, synapses, target): - - # Handling lumped variables using the standard mechanisms is not - # possible, we therefore also directly give the names of the arrays - # to the template. The dummy statement in the second line only serves - # the purpose of including the variable in the namespace - - code = ''' - _synaptic_var = {varname} - {varname}_post = {varname}_post - '''.format(varname=varname) - - template_kwds = {'_target_var_array': synapses.variables[varname+'_post'].arrayname} - - GroupCodeRunner.__init__(self, group=synapses, - template='lumped_variable', - code=code, - # We want to update the lumped variable before - # the target group gets updated - when=(target.clock, 'groups', -1), - name=target.name + '_lumped_variable_' + varname, - template_kwds=template_kwds) - - -class SynapticPathway(GroupCodeRunner, Group): - ''' - The `GroupCodeRunner` that applies the pre/post statement(s) to the state - variables of synapses where the pre-/postsynaptic group spiked in this - time step. - - Parameters - ---------- - - synapses : `Synapses` - Reference to the main `Synapses` object - prepost : {'pre', 'post'} - Whether this object should react to pre- or postsynaptic spikes - objname : str, optional - The name to use for the object, will be appendend to the name of - `synapses` to create a name in the sense of `Nameable`. The `synapses` - object should allow access to this object via - ``synapses.getattr(objname)``. It has to use the actual `objname` - attribute instead of relying on the provided argument, since the name - may have changed to become unique. If ``None`` is provided (the - default), ``prepost+'*'`` will be used (see `Nameable` for an - explanation of the wildcard operator). - ''' - def __init__(self, synapses, code, prepost, objname=None): - self.code = code - if prepost == 'pre': - self.source = synapses.source - self.target = synapses.target - self.synapse_indices = synapses.item_mapping.pre_synaptic - elif prepost == 'post': - self.source = synapses.target - self.target = synapses.source - self.synapse_indices = synapses.item_mapping.post_synaptic - else: - raise ValueError('prepost argument has to be either "pre" or ' - '"post"') - self.synapses = synapses - - if objname is None: - objname = prepost + '*' - - GroupCodeRunner.__init__(self, synapses, - 'synapses', - code=code, - when=(synapses.clock, 'synapses'), - name=synapses.name + '_' + objname) - - self._delays = DynamicArray1D(synapses.N, dtype=np.float64) - # Register the object with the `SynapticIndex` object so it gets - # automatically resized - synapses.item_mapping.register_variable(self._delays) - self.queue = SpikeQueue() - self.spiking_synapses = [] - self.variables = {'_spiking_synapses': AttributeVariable(Unit(1), - self, - 'spiking_synapses', - constant=False), - 'delay': DynamicArrayVariable('delay', second, - self._delays, - group_name=self.name, - constant=True)} - - - # Re-extract the last part of the name from the full name - self.objname = self.name[len(synapses.name) + 1:] - - #: The simulation dt (necessary for the delays) - self.dt = self.synapses.clock.dt_ - - self.item_mapping = synapses.item_mapping - - self.indices = self.synapses.indices - - # Enable access to the delay attribute via the specifier - Group.__init__(self) - - def update_abstract_code(self): - if self.synapses.event_driven is not None: - event_driven_update = independent(self.synapses.event_driven, - self.group.variables) - # TODO: Any way to do this more elegantly? - event_driven_update = re.sub(r'\bdt\b', '(t - lastupdate)', - event_driven_update) - - self.abstract_code = event_driven_update + '\n' - else: - self.abstract_code = '' - - self.abstract_code += self.code + '\n' - self.abstract_code += 'lastupdate = t\n' - - def before_run(self, namespace): - # Update the dt (might have changed between runs) - self.dt = self.synapses.clock.dt_ - GroupCodeRunner.before_run(self, namespace) - # we insert rather than replace because GroupCodeRunner puts a CodeObject in updaters already - self.updaters.insert(0, SynapticPathwayUpdater(self)) - self.queue.compress(np.round(self._delays[:] / self.dt).astype(np.int), - self.synapse_indices, len(self.synapses)) - - -class SynapticPathwayUpdater(Updater): - def run(self): - path = self.owner - # Push new spikes into the queue - spikes = path.source.spikes - offset = path.source.offset - if len(spikes): - # This check is necessary for subgroups - max_index = len(path.synapse_indices) + offset - indices = np.concatenate([path.synapse_indices[spike - offset] - for spike in spikes if - offset <= spike < max_index]).astype(np.int32) - if len(indices): - if len(path._delays) > 1: - delays = np.round(path._delays[indices] / path.dt).astype(int) - else: - delays = np.round(path._delays[:] / path.dt).astype(int) - path.queue.push(indices, delays) - # Get the spikes - path.spiking_synapses = path.queue.peek() - # Advance the spike queue - path.queue.next() - - -class IndexView(object): - - def __init__(self, indices, mapping): - self.index = indices - self.mapping = mapping - - def __getitem__(self, item): - synaptic_indices = self.index[self.mapping[item]] - return synaptic_indices - - -class SynapseIndexView(object): - - def __init__(self, indices): - self.index = indices - - def __getitem__(self, item): - pre = self.index.i[item] - post = self.index.j[item] - - return _synapse_numbers(pre, post) - - -def slice_to_test(x): - ''' - Returns a testing function corresponding to whether an index is in slice x. - x can also be an int. - ''' - if isinstance(x,int): - return lambda y: (y == x) - elif isinstance(x, slice): - if x == slice(None): - # No need for testing - return lambda y: np.repeat(True, len(y)) - start, stop, step = x.start, x.stop, x.step - - if start is None: - # No need to test for >= start - if step is None: - # Only have a stop value - return lambda y: (y < stop) - else: - # Stop and step - return lambda y: (y < stop) & ((y % step) == 0) - else: - # We need to test for >= start - if step is None: - if stop is None: - # Only a start value - return lambda y: (y >= start) - else: - # Start and stop - return lambda y: (y >= start) & (y < stop) - else: - if stop is None: - # Start and step value - return lambda y: (y >= start) & ((y-start)%step == 0) - else: - # Start, step and stop - return lambda y: (y >= start) & ((y-start)%step == 0) & (y < stop) - else: - raise TypeError('Expected int or slice, got {} instead'.format(type(x))) - - -def find_synapses(index, neuron_synaptic, synaptic_neuron): - if isinstance(index, (int, slice)): - test = slice_to_test(index) - found = test(synaptic_neuron[:]) - synapses = np.flatnonzero(found) - else: - synapses = [] - for neuron in index: - targets = neuron_synaptic[neuron] - synapses.extend(targets) - - return synapses - - -def _synapse_numbers(pre_neurons, post_neurons): - # Build an array of synapse numbers by counting the number of times - # a source/target combination exists - synapse_numbers = np.zeros_like(pre_neurons) - numbers = {} - for i, (source, target) in enumerate(zip(pre_neurons, - post_neurons)): - number = numbers.get((source, target), 0) - synapse_numbers[i] = number - numbers[(source, target)] = number + 1 - return synapse_numbers - - -class SynapticItemMapping(Variable): - ''' - Convenience object to store the synaptic indices. - - Parameters - ---------- - synapses : `Synapses` - Reference to the main `Synapses object` - ''' - def __init__(self, synapses): - Variable.__init__(self, Unit(1), value=self, constant=True) - self.source = synapses.source - self.target = synapses.target - source_len = len(synapses.source) - target_len = len(synapses.target) - self.synapses = weakref.proxy(synapses) - dtype = smallest_inttype(MAX_SYNAPSES) - self.synaptic_pre = DynamicArray1D(0, dtype=dtype) - self.synaptic_post = DynamicArray1D(0, dtype=dtype) - self.pre_synaptic = [DynamicArray1D(0, dtype=dtype) - for _ in xrange(source_len)] - self.post_synaptic = [DynamicArray1D(0, dtype=dtype) - for _ in xrange(target_len)] - - self._registered_variables = [] - - self.variables = {'i': DynamicArrayVariable('i', - Unit(1), - self.synaptic_pre), - 'j': DynamicArrayVariable('j', - Unit(1), - self.synaptic_post)} - self.i = IndexView(self.synaptic_pre, self) - self.j = IndexView(self.synaptic_post, self) - self.k = SynapseIndexView(self) - - N = property(fget=lambda self: len(self.synaptic_pre), - doc='Total number of synapses') - - def _resize(self, number): - if not isinstance(number, int): - raise TypeError(('Expected an integer number got {} ' - 'instead').format(type(number))) - if number < self.N: - raise ValueError(('Cannot reduce number of synapses, ' - '{} < {}').format(number, self.N)) - - self.synaptic_pre.resize(number) - self.synaptic_post.resize(number) - - for variable in self._registered_variables: - variable.resize(number) - - def register_variable(self, variable): - ''' - Register a `DynamicArray` to be automatically resized when the size of - the indices change. Called automatically when a `SynapticArrayVariable` - specifier is created. - ''' - if not hasattr(variable, 'resize'): - raise TypeError(('Variable of type {} does not have a resize ' - 'method, cannot register it with the synaptic ' - 'indices.').format(type(variable))) - self._registered_variables.append(weakref.proxy(variable)) - - def unregister_variable(self, variable): - ''' - Unregister a `DynamicArray` from the automatic resizing mechanism. - ''' - proxy_var = weakref.proxy(variable) - # The same variable might have been registered more than once - while proxy_var in self._registered_variables: - self._registered_variables.remove(proxy_var) - - def _add_synapses(self, sources, targets, n, p, condition=None, - level=0): - - if condition is None: - sources = np.atleast_1d(sources) - targets = np.atleast_1d(targets) - n = np.atleast_1d(n) - p = np.atleast_1d(p) - if not len(p) == 1 or p != 1: - use_connections = np.random.rand(len(sources)) < p - sources = sources[use_connections] - targets = targets[use_connections] - n = n[use_connections] - sources = sources.repeat(n) - targets = targets.repeat(n) - new_synapses = len(sources) - - old_N = self.N - new_N = old_N + new_synapses - self._resize(new_N) - - self.synaptic_pre[old_N:new_N] = sources - self.synaptic_post[old_N:new_N] = targets - synapse_idx = old_N - for source, target in zip(sources, targets): - synapses = self.pre_synaptic[source] - synapses.resize(len(synapses) + 1) - synapses[-1] = synapse_idx - synapses = self.post_synaptic[target] - synapses.resize(len(synapses) + 1) - synapses[-1] = synapse_idx - synapse_idx += 1 - else: - abstract_code = '_cond = ' + condition + '\n' - abstract_code += '_n = ' + str(n) + '\n' - abstract_code += '_p = ' + str(p) - namespace = get_local_namespace(level + 1) - additional_namespace = ('implicit-namespace', namespace) - variables = { - '_source_neurons': ArrayVariable('_source_neurons', Unit(1), - self.source.item_mapping[:] - - self.source.offset, - constant=True), - '_target_neurons': ArrayVariable('_target_neurons', Unit(1), - self.target.item_mapping[:] - - self.target.offset, - constant=True), - # Will be set in the template - 'i': Variable(unit=Unit(1), constant=True), - 'j': Variable(unit=Unit(1), constant=True) - } - codeobj = create_runner_codeobj(self.synapses, - abstract_code, - 'synapses_create', - additional_variables=variables, - additional_namespace=additional_namespace, - check_units=False - ) - codeobj() - number = len(self.synaptic_pre) - for variable in self._registered_variables: - variable.resize(number) - - def __len__(self): - return self.N - - def __getitem__(self, index): - ''' - Returns synaptic indices for `index`, which can be a tuple of indices - (including arrays and slices), a single index or a string. - - ''' - if (not isinstance(index, (tuple, basestring)) and - isinstance(index, (int, np.ndarray, slice, - collections.Sequence))): - index = (index, slice(None), slice(None)) - if isinstance(index, tuple): - if len(index) == 2: # two indices (pre- and postsynaptic cell) - index = (index[0], index[1], slice(None)) - elif len(index) > 3: - raise IndexError('Need 1, 2 or 3 indices, got %d.' % len(index)) - - I, J, K = index - - pre_synapses = find_synapses(I, self.pre_synaptic, - self.synaptic_pre) - post_synapses = find_synapses(J, self.post_synaptic, - self.synaptic_post) - matching_synapses = np.intersect1d(pre_synapses, post_synapses, - assume_unique=True) - - if K == slice(None): - return matching_synapses - elif isinstance(K, (int, slice)): - test_k = slice_to_test(K) - else: - raise NotImplementedError(('Indexing synapses with arrays not' - 'implemented yet')) - - pre_neurons = self.synaptic_pre[pre_synapses] - post_neurons = self.synaptic_post[post_synapses] - synapse_numbers = _synapse_numbers(pre_neurons, - post_neurons) - return np.intersect1d(matching_synapses, - np.flatnonzero(test_k(synapse_numbers)), - assume_unique=True) - - elif isinstance(index, basestring): - # interpret the string expression - identifiers = get_identifiers(index) - variables = dict(self.variables) - if 'k' in identifiers: - synapse_numbers = _synapse_numbers(self.synaptic_pre[:], - self.synaptic_post[:]) - variables['k'] = ArrayVariable('k', Unit(1), - synapse_numbers) - namespace = get_local_namespace(1) - additional_namespace = ('implicit-namespace', namespace) - abstract_code = '_cond = ' + index - template = getattr(self.synapses, '_index_with_code_template', - 'state_variable_indexing') - codeobj = create_runner_codeobj(self.synapses, - abstract_code, - template, - additional_variables=variables, - additional_namespace=additional_namespace, - ) - - result = codeobj() - return result - else: - raise IndexError('Unsupported index type {itype}'.format(itype=type(index))) - - -class Synapses(BrianObject, Group): - ''' - Class representing synaptic connections. Creating a new `Synapses` object - does by default not create any synapses -- you either have to provide - the `connect` argument or call the `Synapses.connect` method for that. - - Parameters - ---------- - - source : `SpikeSource` - The source of spikes, e.g. a `NeuronGroup`. - target : `Group`, optional - The target of the spikes, typically a `NeuronGroup`. If none is given, - the same as `source` - model : {`str`, `Equations`}, optional - The model equations for the synapses. - pre : {str, dict}, optional - The code that will be executed after every pre-synaptic spike. Can be - either a single (possibly multi-line) string, or a dictionary mapping - pathway names to code strings. In the first case, the pathway will be - called ``pre`` and made available as an attribute of the same name. - In the latter case, the given names will be used as the - pathway/attribute names. Each pathway has its own code and its own - delays. - post : {str, dict}, optional - The code that will be executed after every post-synaptic spike. Same - conventions as for `pre`, the default name for the pathway is ``post``. - connect : {str, bool}. optional - Determines whether any actual synapses are created. ``False`` (the - default) means not to create any synapses, ``True`` means to create - synapses between all source/target pairs. Also accepts a string - expression that evaluates to ``True`` for every synapse that should - be created, e.g. ``'i == j'`` for a one-to-one connectivity. See - `Synapses.connect` for more details. - delay : {`Quantity`, dict}, optional - The delay for the "pre" pathway (same for all synapses) or a dictionary - mapping pathway names to delays. If a delay is specified in this way - for a pathway, it is stored as a single scalar value. It can still - be changed afterwards, but only to a single scalar value. If you want - to have delays that vary across synapses, do not use the keyword - argument, but instead set the delays via the attribute of the pathway, - e.g. ``S.pre.delay = ...`` (or ``S.delay = ...`` as an abbreviation), - ``S.post.delay = ...``, etc. - namespace : dict, optional - A dictionary mapping identifier names to objects. If not given, the - namespace will be filled in at the time of the call of `Network.run`, - with either the values from the ``network`` argument of the - `Network.run` method or from the local context, if no such argument is - given. - dtype : `dtype`, optional - The standard datatype for all state variables. Defaults to - `core.default_scalar_type`. - codeobj_class : class, optional - The `CodeObject` class to use to run code. - clock : `Clock`, optional - The clock to use. - method : {str, `StateUpdateMethod`}, optional - The numerical integration method to use. If none is given, an - appropriate one is automatically determined. - name : str, optional - The name for this object. If none is given, a unique name of the form - ``synapses``, ``synapses_1``, etc. will be automatically chosen. - ''' - def __init__(self, source, target=None, model=None, pre=None, post=None, - connect=False, delay=None, namespace=None, dtype=None, - codeobj_class=None, - clock=None, method=None, name='synapses*'): - - BrianObject.__init__(self, when=clock, name=name) - - self.codeobj_class = codeobj_class - - self.source = weakref.proxy(source) - if target is None: - self.target = self.source - else: - self.target = weakref.proxy(target) - - ##### Prepare and validate equations - if model is None: - model = '' - - if isinstance(model, basestring): - model = Equations(model) - if not isinstance(model, Equations): - raise TypeError(('model has to be a string or an Equations ' - 'object, is "%s" instead.') % type(model)) - - # Check flags - model.check_flags({DIFFERENTIAL_EQUATION: ['event-driven', 'lumped'], - STATIC_EQUATION: ['lumped'], - PARAMETER: ['constant', 'lumped']}) - - # Separate the equations into event-driven and continuously updated - # equations - event_driven = [] - continuous = [] - for single_equation in model.itervalues(): - if 'event-driven' in single_equation.flags: - if 'lumped' in single_equation.flags: - raise ValueError(('Event-driven variable %s cannot be ' - 'a lumped variable.') % single_equation.varname) - event_driven.append(single_equation) - else: - continuous.append(single_equation) - # Add the lastupdate variable, used by event-driven equations - continuous.append(SingleEquation(PARAMETER, 'lastupdate', second)) - - if len(event_driven): - self.event_driven = Equations(event_driven) - else: - self.event_driven = None - - self.equations = Equations(continuous) - - ##### Setup the memory - self.arrays = self._allocate_memory(dtype=dtype) - - # Setup the namespace - self._given_namespace = namespace - self.namespace = create_namespace(namespace) - - self._queues = {} - self._delays = {} - - self.item_mapping = SynapticItemMapping(self) - self.indices = {'_idx': self.item_mapping, - '_presynaptic_idx': self.item_mapping.synaptic_pre, - '_postsynaptic_idx': self.item_mapping.synaptic_post} - # Allow S.i instead of S.indices.i, etc. - self.i = self.item_mapping.i - self.j = self.item_mapping.j - self.k = self.item_mapping.k - - # Make use of a special template when setting/indexing variables with - # code in order to allow references to pre- and postsynaptic variables - self._set_with_code_template = 'synaptic_variable_set' - self._index_with_code_template = 'synaptic_variable_indexing' - - # Setup variables - self.variables = self._create_variables() - - #: List of names of all updaters, e.g. ['pre', 'post'] - self._synaptic_updaters = [] - for prepost, argument in zip(('pre', 'post'), (pre, post)): - if not argument: - continue - if isinstance(argument, basestring): - self._add_updater(argument, prepost) - elif isinstance(argument, collections.Mapping): - for key, value in argument.iteritems(): - if not isinstance(key, basestring): - err_msg = ('Keys for the "{}" argument' - 'have to be strings, got ' - '{} instead.').format(prepost, type(key)) - raise TypeError(err_msg) - self._add_updater(value, prepost, objname=key) - - # If we have a pathway called "pre" (the most common use case), provide - # direct access to its delay via a delay attribute (instead of having - # to use pre.delay) - if 'pre' in self._synaptic_updaters: - self.variables['delay'] = self.pre.variables['delay'] - - if delay is not None: - if isinstance(delay, Quantity): - if not 'pre' in self._synaptic_updaters: - raise ValueError(('Cannot set delay, no "pre" pathway exists.' - 'Use a dictionary if you want to set the ' - 'delay for a pathway with a different name.')) - delay = {'pre': delay} - - if not isinstance(delay, collections.Mapping): - raise TypeError('Delay argument has to be a quantity or a ' - 'dictionary, is type %s instead.' % type(delay)) - for pathway, pathway_delay in delay.iteritems(): - if not pathway in self._synaptic_updaters: - raise ValueError(('Cannot set the delay for pathway ' - '"%s": unknown pathway.') % pathway) - if not isinstance(pathway_delay, Quantity): - raise TypeError(('Cannot set the delay for pathway "%s": ' - 'expected a quantity, got %s instead.') % (pathway, - type(pathway_delay))) - if pathway_delay.size != 1: - raise TypeError(('Cannot set the delay for pathway "%s": ' - 'expected a scalar quantity, got a ' - 'quantity with shape %s instead.') % str(pathway_delay.shape)) - fail_for_dimension_mismatch(pathway_delay, second, ('Delay has to be ' - 'specified in units ' - 'of seconds')) - updater = getattr(self, pathway) - self.item_mapping.unregister_variable(updater._delays) - del updater._delays - # For simplicity, store the delay as a one-element array - # so that for example updater._delays[:] works. - updater._delays = np.array([float(pathway_delay)]) - variable = ArrayVariable('delay', second, updater._delays, - group_name=self.name, scalar=True) - updater.variables['delay'] = variable - if pathway == 'pre': - self.variables['delay'] = variable - - #: Performs numerical integration step - self.state_updater = StateUpdater(self, method) - self.contained_objects.append(self.state_updater) - - #: "Lumped variable" mechanism -- sum over all synapses of a - #: postsynaptic target - self.lumped_updaters = {} - for single_equation in self.equations.itervalues(): - if 'lumped' in single_equation.flags: - varname = single_equation.varname - # For a lumped variable, we need an equivalent parameter in the - # target group - if not varname in self.target.variables: - raise ValueError(('The lumped variable %s needs a variable ' - 'of the same name in the target ' - 'group ') % single_equation.varname) - fail_for_dimension_mismatch(self.variables[varname].unit, - self.target.variables[varname].unit, - ('Lumped variables need to have ' - 'the same units in Synapses ' - 'and the target group')) - # TODO: Add some more stringent check about the type of - # variable in the target group - updater = LumpedUpdater(varname, self, self.target) - self.lumped_updaters[varname] = updater - self.contained_objects.append(updater) - - # Do an initial connect, if requested - if not isinstance(connect, (bool, basestring)): - raise TypeError(('"connect" keyword has to be a boolean value or a ' - 'string, is type %s instead.' % type(connect))) - self._initial_connect = connect - if not connect is False: - self.connect(connect, level=1) - - # Activate name attribute access - Group.__init__(self) - - N = property(fget=lambda self: self.item_mapping.N, - doc='Total number of synapses') - - def __len__(self): - return self.N - - def before_run(self, namespace): - self.lastupdate = self.clock.t - super(Synapses, self).before_run(namespace) - - def _add_updater(self, code, prepost, objname=None): - ''' - Add a new target updater. Users should call `add_pre` or `add_post` - instead. - - Parameters - ---------- - code : str - The abstract code that should be executed on pre-/postsynaptic - spikes. - prepost : {'pre', 'post'} - Whether the code is triggered by presynaptic or postsynaptic spikes - objname : str, optional - A name for the object, see `SynapticPathway` for more details. - - Returns - ------- - objname : str - The final name for the object. Equals `objname` if it was explicitly - given (and did not end in a wildcard character). - - ''' - if prepost == 'pre': - spike_group, group_name = self.source, 'Source' - elif prepost == 'post': - spike_group = self.target, 'Target' - else: - raise ValueError(('"prepost" argument has to be "pre" or "post", ' - 'is "%s".') % prepost) - - if not hasattr(spike_group, 'spikes') and hasattr(spike_group, 'clock'): - raise TypeError(('%s has to be a SpikeSource with spikes and' - ' clock attribute. Is type %r instead') - % (group_name, type(spike_group))) - - updater = SynapticPathway(self, code, prepost, objname) - objname = updater.objname - if hasattr(self, objname): - raise ValueError(('Cannot add updater with name "{name}", synapses ' - 'object already has an attribute with this ' - 'name.').format(name=objname)) - - setattr(self, objname, updater) - self._synaptic_updaters.append(objname) - self.contained_objects.append(updater) - return objname - - def _create_variables(self): - ''' - Create the variables dictionary for this `Synapses`, containing - entries for the equation variables and some standard entries. - ''' - # Add all the pre and post variables with _pre and _post suffixes - v = {} - self.variable_indices = defaultdict(lambda: '_idx') - for name, var in getattr(self.source, 'variables', {}).iteritems(): - if isinstance(var, (ArrayVariable, Subexpression)): - v[name + '_pre'] = var - self.variable_indices[name + '_pre'] = '_presynaptic_idx' - for name, var in getattr(self.target, 'variables', {}).iteritems(): - if isinstance(var, (ArrayVariable, Subexpression)): - v[name + '_post'] = var - self.variable_indices[name + '_post'] = '_postsynaptic_idx' - # Also add all the post variables without a suffix -- if this - # clashes with the name of a state variable defined in this - # Synapses group, the latter will overwrite the entry later and - # take precedence - v[name] = var - self.variable_indices[name] = '_postsynaptic_idx' - - # Standard variables always present - v.update({'t': AttributeVariable(second, self.clock, 't_', - constant=False), - 'dt': AttributeVariable(second, self.clock, 'dt_', - constant=True), - '_num_source_neurons': Variable(Unit(1), len(self.source), - constant=True), - '_num_target_neurons': Variable(Unit(1), len(self.target), - constant=True), - '_source_offset': Variable(Unit(1), self.source.offset, - constant=True), - '_target_offset': Variable(Unit(1), self.target.offset, - constant=True), - '_synaptic_pre': DynamicArrayVariable('_synaptic_pre', - Unit(1), - self.item_mapping.synaptic_pre), - '_synaptic_post': DynamicArrayVariable('_synaptic_pre', - Unit(1), - self.item_mapping.synaptic_post), - # We don't need "proper" specifier for these -- they go - # back to Python code currently - '_pre_synaptic': Variable(Unit(1), self.item_mapping.pre_synaptic), - '_post_synaptic': Variable(Unit(1), - self.item_mapping.post_synaptic)}) - - for eq in itertools.chain(self.equations.itervalues(), - self.event_driven.itervalues() - if self.event_driven is not None else []): - if eq.type in (DIFFERENTIAL_EQUATION, PARAMETER): - array = self.arrays[eq.varname] - constant = ('constant' in eq.flags) - # We are dealing with dynamic arrays here, code generation - # shouldn't directly access the specifier.array attribute but - # use specifier.get_value() to get a reference to the underlying - # array - v[eq.varname] = DynamicArrayVariable(eq.varname, - eq.unit, - array, - group_name=self.name, - constant=constant, - is_bool=eq.is_bool) - if eq.varname in self.variable_indices: - # we are overwriting a postsynaptic variable of the same - # name, delete the reference to the postsynaptic index - del self.variable_indices[eq.varname] - # Register the array with the `SynapticItemMapping` object so - # it gets automatically resized - self.item_mapping.register_variable(array) - elif eq.type == STATIC_EQUATION: - v.update({eq.varname: Subexpression(eq.unit, - brian_prefs['core.default_scalar_dtype'], - str(eq.expr), - variables=v, - namespace=self.namespace, - is_bool=eq.is_bool)}) - else: - raise AssertionError('Unknown type of equation: ' + eq.eq_type) - - # Stochastic variables - for xi in self.equations.stochastic_variables: - v.update({xi: StochasticVariable()}) - - return v - - def _allocate_memory(self, dtype=None): - # Allocate memory (TODO: this should be refactored somewhere at some point) - arrayvarnames = set(eq.varname for eq in self.equations.itervalues() if - eq.type in (DIFFERENTIAL_EQUATION, - PARAMETER)) - if self.event_driven is not None: - # Only differential equations are event-driven - arrayvarnames |= set(eq.varname - for eq in self.event_driven.itervalues()) - - arrays = {} - for name in arrayvarnames: - if isinstance(dtype, dict): - curdtype = dtype[name] - else: - curdtype = dtype - if curdtype is None: - curdtype = brian_prefs['core.default_scalar_dtype'] - arrays[name] = DynamicArray1D(0) - logger.debug("NeuronGroup memory allocated successfully.") - return arrays - - def connect(self, pre_or_cond, post=None, p=1., n=1, level=0): - ''' - Add synapses. The first argument can be either a presynaptic index - (int or array) or a condition for synapse creation in the form of a - string that evaluates to a boolean value (or directly a boolean value). - If it is given as an index, also `post` has to be present. A string - condition will be evaluated for all pre-/postsynaptic indices, which - can be referred to as `i` and `j`. - - Parameters - ---------- - pre_or_cond : {int, ndarray of int, bool, str} - The presynaptic neurons (in the form of an index or an array of - indices) or a boolean value or a string that evaluates to a - boolean value. If it is an index, then also `post` has to be - given. - post_neurons : {int, ndarray of int), optional - GroupIndices of neurons from the target group. Non-optional if one or - more presynaptic indices have been given. - p : float, optional - The probability to create `n` synapses wherever the condition - given as `pre_or_cond` evaluates to true or for the given - pre/post indices. - n : int, optional - The number of synapses to create per pre/post connection pair. - Defaults to 1. - - Examples - -------- - >>> from brian2 import * - >>> import numpy as np - >>> G = NeuronGroup(10, 'dv/dt = -v / tau : 1', threshold='v>1', reset='v=0') - >>> S = Synapses(G, G, 'w:1', pre='v+=w') - >>> S.connect('i != j') # all-to-all but no self-connections - >>> S.connect(0, 0) # connect neuron 0 to itself - >>> S.connect(np.array([1, 2]), np.array([2, 1])) # connect 1->2 and 2->1 - >>> S.connect(True) # connect all-to-all - >>> S.connect('i != j', p=0.1) # Connect neurons with 10% probability, exclude self-connections - >>> S.connect('i == j', n=2) # Connect all neurons to themselves with 2 synapses - ''' - if not isinstance(pre_or_cond, (bool, basestring)): - pre_or_cond = np.asarray(pre_or_cond) - if not np.issubdtype(pre_or_cond.dtype, np.int): - raise TypeError(('Presynaptic indices have to be given as ' - 'integers, are type %s instead.') % pre_or_cond.dtype) - - post = np.asarray(post) - if not np.issubdtype(post.dtype, np.int): - raise TypeError(('Presynaptic indices can only be combined ' - 'with postsynaptic integer indices))')) - if isinstance(n, basestring): - raise TypeError(('Indices cannot be combined with a string' - 'expression for n. Either use an array/scalar ' - 'for n, or a string expression for the ' - 'connections')) - i, j, n = np.broadcast_arrays(pre_or_cond, post, n) - if i.ndim > 1: - raise ValueError('Can only use 1-dimensional indices') - self.item_mapping._add_synapses(i, j, n, p, level=level+1) - elif isinstance(pre_or_cond, (basestring, bool)): - if pre_or_cond is False: - return # nothing to do... - elif pre_or_cond is True: - # TODO: This should not be handled with the general mechanism - pre_or_cond = 'True' - if post is not None: - raise ValueError('Cannot give a postsynaptic index when ' - 'using a string expression') - if not isinstance(n, (int, basestring)): - raise TypeError('n has to be an integer or a string evaluating ' - 'to an integer, is type %s instead.' % type(n)) - if not isinstance(p, (float, basestring)): - raise TypeError('p has to be a float or a string evaluating ' - 'to an float, is type %s instead.' % type(n)) - self.item_mapping._add_synapses(None, None, n, p, condition=pre_or_cond, - level=level+1) - else: - raise TypeError(('First argument has to be an index or a ' - 'string, is %s instead.') % type(pre_or_cond)) - - -def smallest_inttype(N): - ''' - Returns the smallest signed integer dtype that can store N indexes. - ''' - if N<=127: - return np.int8 - elif N<=32727: - return np.int16 - elif N<=2147483647: - return np.int32 - else: - return np.int64 +import collections +from collections import defaultdict +import weakref +import itertools +import re + +import numpy as np + +from brian2.core.base import BrianObject, Updater +from brian2.core.namespace import create_namespace +from brian2.core.preferences import brian_prefs +from brian2.core.variables import (ArrayVariable, DynamicArrayVariable, + Variable, Subexpression, AttributeVariable, + StochasticVariable) +from brian2.devices.device import get_device +from brian2.equations.equations import (Equations, SingleEquation, + DIFFERENTIAL_EQUATION, STATIC_EQUATION, + PARAMETER) +from brian2.groups.group import Group, GroupCodeRunner, create_runner_codeobj +from brian2.memory.dynamicarray import DynamicArray1D +from brian2.stateupdaters.base import StateUpdateMethod +from brian2.stateupdaters.exact import independent +from brian2.units.fundamentalunits import (Unit, Quantity, + fail_for_dimension_mismatch) +from brian2.units.allunits import second +from brian2.utils.logger import get_logger +from brian2.utils.stringtools import get_identifiers +from brian2.core.namespace import get_local_namespace + +from .spikequeue import SpikeQueue + +MAX_SYNAPSES = 2147483647 + +__all__ = ['Synapses'] + +logger = get_logger(__name__) + + +class StateUpdater(GroupCodeRunner): + ''' + The `GroupCodeRunner` that updates the state variables of a `Synapses` + at every timestep. + ''' + def __init__(self, group, method): + self.method_choice = method + GroupCodeRunner.__init__(self, group, + 'synaptic_stateupdate', + when=(group.clock, 'groups'), + name=group.name + '_stateupdater', + check_units=False) + + self.method = StateUpdateMethod.determine_stateupdater(self.group.equations, + self.group.variables, + method) + + def update_abstract_code(self): + + self.method = StateUpdateMethod.determine_stateupdater(self.group.equations, + self.group.variables, + self.method_choice) + + self.abstract_code = self.method(self.group.equations, + self.group.variables) + + +class LumpedUpdater(GroupCodeRunner): + ''' + The `GroupCodeRunner` that updates a value in the target group with the + sum over values in the `Synapses` object. + ''' + def __init__(self, varname, synapses, target): + + # Handling lumped variables using the standard mechanisms is not + # possible, we therefore also directly give the names of the arrays + # to the template. The dummy statement in the second line only serves + # the purpose of including the variable in the namespace + + code = ''' + _synaptic_var = {varname} + {varname}_post = {varname}_post + '''.format(varname=varname) + + template_kwds = {'_target_var_array': synapses.variables[varname+'_post'].arrayname} + + GroupCodeRunner.__init__(self, group=synapses, + template='lumped_variable', + code=code, + # We want to update the lumped variable before + # the target group gets updated + when=(target.clock, 'groups', -1), + name=target.name + '_lumped_variable_' + varname, + template_kwds=template_kwds) + + +class SynapticPathway(GroupCodeRunner, Group): + ''' + The `GroupCodeRunner` that applies the pre/post statement(s) to the state + variables of synapses where the pre-/postsynaptic group spiked in this + time step. + + Parameters + ---------- + + synapses : `Synapses` + Reference to the main `Synapses` object + prepost : {'pre', 'post'} + Whether this object should react to pre- or postsynaptic spikes + objname : str, optional + The name to use for the object, will be appendend to the name of + `synapses` to create a name in the sense of `Nameable`. The `synapses` + object should allow access to this object via + ``synapses.getattr(objname)``. It has to use the actual `objname` + attribute instead of relying on the provided argument, since the name + may have changed to become unique. If ``None`` is provided (the + default), ``prepost+'*'`` will be used (see `Nameable` for an + explanation of the wildcard operator). + ''' + def __init__(self, synapses, code, prepost, objname=None): + self.code = code + if prepost == 'pre': + self.source = synapses.source + self.target = synapses.target + self.synapse_indices = synapses.item_mapping.pre_synaptic + elif prepost == 'post': + self.source = synapses.target + self.target = synapses.source + self.synapse_indices = synapses.item_mapping.post_synaptic + else: + raise ValueError('prepost argument has to be either "pre" or ' + '"post"') + self.synapses = synapses + + if objname is None: + objname = prepost + '*' + + GroupCodeRunner.__init__(self, synapses, + 'synapses', + code=code, + when=(synapses.clock, 'synapses'), + name=synapses.name + '_' + objname) + + self.queue = SpikeQueue() + self.spiking_synapses = [] + self.variables = {'_spiking_synapses': AttributeVariable(Unit(1), + self, + 'spiking_synapses', + constant=False), + 'delay': get_device().dynamic_array_1d(self, 'delay', + synapses.N, + second, + constant=True)} + self._delays = self.variables['delay'] + # Register the object with the `SynapticIndex` object so it gets + # automatically resized + synapses.item_mapping.register_variable(self._delays) + + # Re-extract the last part of the name from the full name + self.objname = self.name[len(synapses.name) + 1:] + + #: The simulation dt (necessary for the delays) + self.dt = self.synapses.clock.dt_ + + self.item_mapping = synapses.item_mapping + + self.indices = self.synapses.indices + + # Enable access to the delay attribute via the specifier + self._enable_group_attributes() + + def update_abstract_code(self): + if self.synapses.event_driven is not None: + event_driven_update = independent(self.synapses.event_driven, + self.group.variables) + # TODO: Any way to do this more elegantly? + event_driven_update = re.sub(r'\bdt\b', '(t - lastupdate)', + event_driven_update) + + self.abstract_code = event_driven_update + '\n' + else: + self.abstract_code = '' + + self.abstract_code += self.code + '\n' + self.abstract_code += 'lastupdate = t\n' + + def before_run(self, namespace): + # Get the existing spikes in the queue + spikes = self.queue.extract_spikes() + # Convert the integer time steps into floating point time + spikes[:, 0] *= self.dt + # Update the dt (might have changed between runs) + self.dt = self.synapses.clock.dt_ + self.queue.compress(np.round(self._delays.get_value() / self.dt).astype(np.int), + self.synapse_indices, len(self.synapses)) + # Convert the floating point time back to integer time (dt might have changed) + spikes[:, 0] = np.round(spikes[:, 0] / self.dt) + # Re-insert the spikes into the queue + self.queue.store_spikes(spikes) + + GroupCodeRunner.before_run(self, namespace) + # we insert rather than replace because GroupCodeRunner puts a CodeObject in updaters already + self.pushspikes_codeobj = get_device().code_object(self, + self.name+'_push_spikes_codeobject*', + '', + {}, + self.group.variables, + 'synapses_push_spikes', + self.group.indices, + self.group.variable_indices, + ) + self.updaters.insert(0, self.pushspikes_codeobj.get_updater()) + #self.updaters.insert(0, SynapticPathwayUpdater(self)) + + def push_spikes(self): + # Push new spikes into the queue + spikes = self.source.spikes + offset = self.source.offset + if len(spikes): + # This check is necessary for subgroups + max_index = len(self.synapse_indices) + offset + indices = np.concatenate([self.synapse_indices[spike - offset] + for spike in spikes if + offset <= spike < max_index]).astype(np.int32) + + if len(indices): + if len(self._delays) > 1: + delays = np.round(self._delays[indices] / self.dt).astype(int) + else: + delays = np.round(self._delays.get_value() / self.dt).astype(int) + self.queue.push(indices, delays) + # Get the spikes + self.spiking_synapses = self.queue.peek() + # Advance the spike queue + self.queue.next() + + +class IndexView(object): + + def __init__(self, indices, mapping): + self.index = indices + self.mapping = mapping + + def __getitem__(self, item): + try: + synaptic_indices = self.index[self.mapping.get_value()[item]] + except TypeError as ex: + print 'index', self.index + raise ex + return synaptic_indices + + +class SynapseIndexView(object): + + def __init__(self, indices): + self.index = indices + + def __getitem__(self, item): + pre = self.index.i[item] + post = self.index.j[item] + + return _synapse_numbers(pre, post) + + +def slice_to_test(x): + ''' + Returns a testing function corresponding to whether an index is in slice x. + x can also be an int. + ''' + if isinstance(x,int): + return lambda y: (y == x) + elif isinstance(x, slice): + if x == slice(None): + # No need for testing + return lambda y: np.repeat(True, len(y)) + start, stop, step = x.start, x.stop, x.step + + if start is None: + # No need to test for >= start + if step is None: + # Only have a stop value + return lambda y: (y < stop) + else: + # Stop and step + return lambda y: (y < stop) & ((y % step) == 0) + else: + # We need to test for >= start + if step is None: + if stop is None: + # Only a start value + return lambda y: (y >= start) + else: + # Start and stop + return lambda y: (y >= start) & (y < stop) + else: + if stop is None: + # Start and step value + return lambda y: (y >= start) & ((y-start)%step == 0) + else: + # Start, step and stop + return lambda y: (y >= start) & ((y-start)%step == 0) & (y < stop) + else: + raise TypeError('Expected int or slice, got {} instead'.format(type(x))) + + +def find_synapses(index, neuron_synaptic, synaptic_neuron): + if isinstance(index, (int, slice)): + test = slice_to_test(index) + found = test(synaptic_neuron[:]) + synapses = np.flatnonzero(found) + else: + synapses = [] + for neuron in index: + targets = neuron_synaptic[neuron] + synapses.extend(targets) + + return synapses + + +def _synapse_numbers(pre_neurons, post_neurons): + # Build an array of synapse numbers by counting the number of times + # a source/target combination exists + synapse_numbers = np.zeros_like(pre_neurons) + numbers = {} + for i, (source, target) in enumerate(zip(pre_neurons, + post_neurons)): + number = numbers.get((source, target), 0) + synapse_numbers[i] = number + numbers[(source, target)] = number + 1 + return synapse_numbers + + +class SynapticItemMapping(Variable): + ''' + Convenience object to store the synaptic indices. + + Parameters + ---------- + synapses : `Synapses` + Reference to the main `Synapses object` + ''' + def __init__(self, synapses): + Variable.__init__(self, Unit(1), value=self, constant=True) + self.source = synapses.source + self.target = synapses.target + self.synapses = weakref.proxy(synapses) + self.synaptic_pre = self.synapses.variables['_synaptic_pre'] + self.synaptic_post = self.synapses.variables['_synaptic_post'] + self.pre_synaptic = self.synapses.pre_synaptic + self.post_synaptic = self.synapses.post_synaptic + + self._registered_variables = [] + + self.variables = {'i': self.synapses.variables['_synaptic_pre'], + 'j': self.synapses.variables['_synaptic_post']} + self.i = IndexView(self.variables['i'], self) + self.j = IndexView(self.variables['j'], self) + self.k = SynapseIndexView(self) + + N = property(fget=lambda self: len(self.variables['i']), + doc='Total number of synapses') + + def _resize(self, number): + if not isinstance(number, int): + raise TypeError(('Expected an integer number got {} ' + 'instead').format(type(number))) + if number < self.N: + raise ValueError(('Cannot reduce number of synapses, ' + '{} < {}').format(number, self.N)) + + self.variables['i'].resize(number) + self.variables['j'].resize(number) + + for variable in self._registered_variables: + variable.resize(number) + + def register_variable(self, variable): + ''' + Register a `DynamicArray` to be automatically resized when the size of + the indices change. Called automatically when a `SynapticArrayVariable` + specifier is created. + ''' + if not hasattr(variable, 'resize'): + raise TypeError(('Variable of type {} does not have a resize ' + 'method, cannot register it with the synaptic ' + 'indices.').format(type(variable))) + self._registered_variables.append(weakref.proxy(variable)) + + def unregister_variable(self, variable): + ''' + Unregister a `DynamicArray` from the automatic resizing mechanism. + ''' + proxy_var = weakref.proxy(variable) + # The same variable might have been registered more than once + while proxy_var in self._registered_variables: + self._registered_variables.remove(proxy_var) + + def _add_synapses(self, sources, targets, n, p, condition=None, + level=0): + + if condition is None: + sources = np.atleast_1d(sources) + targets = np.atleast_1d(targets) + n = np.atleast_1d(n) + p = np.atleast_1d(p) + if not len(p) == 1 or p != 1: + use_connections = np.random.rand(len(sources)) < p + sources = sources[use_connections] + targets = targets[use_connections] + n = n[use_connections] + sources = sources.repeat(n) + targets = targets.repeat(n) + new_synapses = len(sources) + + old_N = self.N + new_N = old_N + new_synapses + self._resize(new_N) + + self.synaptic_pre[old_N:new_N] = sources + self.synaptic_post[old_N:new_N] = targets + synapse_idx = old_N + for source, target in zip(sources, targets): + synapses = self.pre_synaptic[source] + synapses.resize(len(synapses) + 1) + synapses[-1] = synapse_idx + synapses = self.post_synaptic[target] + synapses.resize(len(synapses) + 1) + synapses[-1] = synapse_idx + synapse_idx += 1 + else: + abstract_code = '_cond = ' + condition + '\n' + abstract_code += '_n = ' + str(n) + '\n' + abstract_code += '_p = ' + str(p) + namespace = get_local_namespace(level + 1) + additional_namespace = ('implicit-namespace', namespace) + variables = { + '_source_neurons': ArrayVariable('_source_neurons', Unit(1), + self.source.item_mapping[:] - + self.source.offset, + constant=True), + '_target_neurons': ArrayVariable('_target_neurons', Unit(1), + self.target.item_mapping[:] - + self.target.offset, + constant=True), + # Will be set in the template + 'i': Variable(unit=Unit(1), constant=True), + 'j': Variable(unit=Unit(1), constant=True) + } + codeobj = create_runner_codeobj(self.synapses, + abstract_code, + 'synapses_create', + additional_variables=variables, + additional_namespace=additional_namespace, + check_units=False + ) + codeobj() + number = len(self.variables['i']) + for variable in self._registered_variables: + variable.resize(number) + + def __len__(self): + return self.N + + def __getitem__(self, index): + ''' + Returns synaptic indices for `index`, which can be a tuple of indices + (including arrays and slices), a single index or a string. + + ''' + if (not isinstance(index, (tuple, basestring)) and + isinstance(index, (int, np.ndarray, slice, + collections.Sequence))): + index = (index, slice(None), slice(None)) + if isinstance(index, tuple): + if len(index) == 2: # two indices (pre- and postsynaptic cell) + index = (index[0], index[1], slice(None)) + elif len(index) > 3: + raise IndexError('Need 1, 2 or 3 indices, got %d.' % len(index)) + + I, J, K = index + + pre_synapses = find_synapses(I, self.pre_synaptic, + self.variables['i'].get_value()) + post_synapses = find_synapses(J, self.post_synaptic, + self.variables['j'].get_value()) + matching_synapses = np.intersect1d(pre_synapses, post_synapses, + assume_unique=True) + + if K == slice(None): + return matching_synapses + elif isinstance(K, (int, slice)): + test_k = slice_to_test(K) + else: + raise NotImplementedError(('Indexing synapses with arrays not' + 'implemented yet')) + + pre_neurons = self.synaptic_pre[pre_synapses] + post_neurons = self.synaptic_post[post_synapses] + synapse_numbers = _synapse_numbers(pre_neurons, + post_neurons) + return np.intersect1d(matching_synapses, + np.flatnonzero(test_k(synapse_numbers)), + assume_unique=True) + + elif isinstance(index, basestring): + # interpret the string expression + identifiers = get_identifiers(index) + variables = dict(self.variables) + if 'k' in identifiers: + synapse_numbers = _synapse_numbers(self.synaptic_pre[:], + self.synaptic_post[:]) + variables['k'] = ArrayVariable('k', Unit(1), + synapse_numbers) + namespace = get_local_namespace(1) + additional_namespace = ('implicit-namespace', namespace) + abstract_code = '_cond = ' + index + template = getattr(self.synapses, '_index_with_code_template', + 'state_variable_indexing') + codeobj = create_runner_codeobj(self.synapses, + abstract_code, + template, + additional_variables=variables, + additional_namespace=additional_namespace, + ) + + result = codeobj() + return result + else: + raise IndexError('Unsupported index type {itype}'.format(itype=type(index))) + + +class Synapses(Group): + ''' + Class representing synaptic connections. Creating a new `Synapses` object + does by default not create any synapses -- you either have to provide + the `connect` argument or call the `Synapses.connect` method for that. + + Parameters + ---------- + + source : `SpikeSource` + The source of spikes, e.g. a `NeuronGroup`. + target : `Group`, optional + The target of the spikes, typically a `NeuronGroup`. If none is given, + the same as `source` + model : {`str`, `Equations`}, optional + The model equations for the synapses. + pre : {str, dict}, optional + The code that will be executed after every pre-synaptic spike. Can be + either a single (possibly multi-line) string, or a dictionary mapping + pathway names to code strings. In the first case, the pathway will be + called ``pre`` and made available as an attribute of the same name. + In the latter case, the given names will be used as the + pathway/attribute names. Each pathway has its own code and its own + delays. + post : {str, dict}, optional + The code that will be executed after every post-synaptic spike. Same + conventions as for `pre`, the default name for the pathway is ``post``. + connect : {str, bool}. optional + Determines whether any actual synapses are created. ``False`` (the + default) means not to create any synapses, ``True`` means to create + synapses between all source/target pairs. Also accepts a string + expression that evaluates to ``True`` for every synapse that should + be created, e.g. ``'i == j'`` for a one-to-one connectivity. See + `Synapses.connect` for more details. + delay : {`Quantity`, dict}, optional + The delay for the "pre" pathway (same for all synapses) or a dictionary + mapping pathway names to delays. If a delay is specified in this way + for a pathway, it is stored as a single scalar value. It can still + be changed afterwards, but only to a single scalar value. If you want + to have delays that vary across synapses, do not use the keyword + argument, but instead set the delays via the attribute of the pathway, + e.g. ``S.pre.delay = ...`` (or ``S.delay = ...`` as an abbreviation), + ``S.post.delay = ...``, etc. + namespace : dict, optional + A dictionary mapping identifier names to objects. If not given, the + namespace will be filled in at the time of the call of `Network.run`, + with either the values from the ``network`` argument of the + `Network.run` method or from the local context, if no such argument is + given. + dtype : `dtype`, optional + The standard datatype for all state variables. Defaults to + `core.default_scalar_type`. + codeobj_class : class, optional + The `CodeObject` class to use to run code. + clock : `Clock`, optional + The clock to use. + method : {str, `StateUpdateMethod`}, optional + The numerical integration method to use. If none is given, an + appropriate one is automatically determined. + name : str, optional + The name for this object. If none is given, a unique name of the form + ``synapses``, ``synapses_1``, etc. will be automatically chosen. + ''' + def __init__(self, source, target=None, model=None, pre=None, post=None, + connect=False, delay=None, namespace=None, dtype=None, + codeobj_class=None, + clock=None, method=None, name='synapses*'): + + Group.__init__(self, when=clock, name=name) + + self.codeobj_class = codeobj_class + + self.source = weakref.proxy(source) + if target is None: + self.target = self.source + else: + self.target = weakref.proxy(target) + + ##### Prepare and validate equations + if model is None: + model = '' + + if isinstance(model, basestring): + model = Equations(model) + if not isinstance(model, Equations): + raise TypeError(('model has to be a string or an Equations ' + 'object, is "%s" instead.') % type(model)) + + # Check flags + model.check_flags({DIFFERENTIAL_EQUATION: ['event-driven', 'lumped'], + STATIC_EQUATION: ['lumped'], + PARAMETER: ['constant', 'lumped']}) + + # Separate the equations into event-driven and continuously updated + # equations + event_driven = [] + continuous = [] + for single_equation in model.itervalues(): + if 'event-driven' in single_equation.flags: + if 'lumped' in single_equation.flags: + raise ValueError(('Event-driven variable %s cannot be ' + 'a lumped variable.') % single_equation.varname) + event_driven.append(single_equation) + else: + continuous.append(single_equation) + # Add the lastupdate variable, used by event-driven equations + continuous.append(SingleEquation(PARAMETER, 'lastupdate', second)) + + if len(event_driven): + self.event_driven = Equations(event_driven) + else: + self.event_driven = None + + self.equations = Equations(continuous) + + # Setup the namespace + self._given_namespace = namespace + self.namespace = create_namespace(namespace) + + self._queues = {} + self._delays = {} + + # Make use of a special template when setting/indexing variables with + # code in order to allow references to pre- and postsynaptic variables + self._set_with_code_template = 'synaptic_variable_set' + self._index_with_code_template = 'synaptic_variable_indexing' + + # Setup variables + self.variables = self._create_variables() + + self.item_mapping = SynapticItemMapping(self) + for var in self.variables.itervalues(): + if isinstance(var, DynamicArrayVariable): + # Register the array with the `SynapticItemMapping` object so + # it gets automatically resized + self.item_mapping.register_variable(var) + + self.indices = {'_idx': self.item_mapping, + '_presynaptic_idx': self.variables['_synaptic_pre'], + '_postsynaptic_idx': self.variables['_synaptic_post']} + # Allow S.i instead of S.indices.i, etc. + self.i = self.item_mapping.i + self.j = self.item_mapping.j + self.k = self.item_mapping.k + + #: List of names of all updaters, e.g. ['pre', 'post'] + self._synaptic_updaters = [] + for prepost, argument in zip(('pre', 'post'), (pre, post)): + if not argument: + continue + if isinstance(argument, basestring): + self._add_updater(argument, prepost) + elif isinstance(argument, collections.Mapping): + for key, value in argument.iteritems(): + if not isinstance(key, basestring): + err_msg = ('Keys for the "{}" argument' + 'have to be strings, got ' + '{} instead.').format(prepost, type(key)) + raise TypeError(err_msg) + self._add_updater(value, prepost, objname=key) + + # If we have a pathway called "pre" (the most common use case), provide + # direct access to its delay via a delay attribute (instead of having + # to use pre.delay) + if 'pre' in self._synaptic_updaters: + self.variables['delay'] = self.pre.variables['delay'] + + if delay is not None: + if isinstance(delay, Quantity): + if not 'pre' in self._synaptic_updaters: + raise ValueError(('Cannot set delay, no "pre" pathway exists.' + 'Use a dictionary if you want to set the ' + 'delay for a pathway with a different name.')) + delay = {'pre': delay} + + if not isinstance(delay, collections.Mapping): + raise TypeError('Delay argument has to be a quantity or a ' + 'dictionary, is type %s instead.' % type(delay)) + for pathway, pathway_delay in delay.iteritems(): + if not pathway in self._synaptic_updaters: + raise ValueError(('Cannot set the delay for pathway ' + '"%s": unknown pathway.') % pathway) + if not isinstance(pathway_delay, Quantity): + raise TypeError(('Cannot set the delay for pathway "%s": ' + 'expected a quantity, got %s instead.') % (pathway, + type(pathway_delay))) + if pathway_delay.size != 1: + raise TypeError(('Cannot set the delay for pathway "%s": ' + 'expected a scalar quantity, got a ' + 'quantity with shape %s instead.') % str(pathway_delay.shape)) + fail_for_dimension_mismatch(pathway_delay, second, ('Delay has to be ' + 'specified in units ' + 'of seconds')) + updater = getattr(self, pathway) + self.item_mapping.unregister_variable(updater._delays) + del updater._delays + # For simplicity, store the delay as a one-element array + # so that for example updater._delays[:] works. + updater._delays = np.array([float(pathway_delay)]) + variable = ArrayVariable('delay', second, updater._delays, + group_name=self.name, scalar=True) + updater.variables['delay'] = variable + if pathway == 'pre': + self.variables['delay'] = variable + + #: Performs numerical integration step + self.state_updater = StateUpdater(self, method) + self.contained_objects.append(self.state_updater) + + #: "Lumped variable" mechanism -- sum over all synapses of a + #: postsynaptic target + self.lumped_updaters = {} + for single_equation in self.equations.itervalues(): + if 'lumped' in single_equation.flags: + varname = single_equation.varname + # For a lumped variable, we need an equivalent parameter in the + # target group + if not varname in self.target.variables: + raise ValueError(('The lumped variable %s needs a variable ' + 'of the same name in the target ' + 'group ') % single_equation.varname) + fail_for_dimension_mismatch(self.variables[varname].unit, + self.target.variables[varname].unit, + ('Lumped variables need to have ' + 'the same units in Synapses ' + 'and the target group')) + # TODO: Add some more stringent check about the type of + # variable in the target group + updater = LumpedUpdater(varname, self, self.target) + self.lumped_updaters[varname] = updater + self.contained_objects.append(updater) + + # Do an initial connect, if requested + if not isinstance(connect, (bool, basestring)): + raise TypeError(('"connect" keyword has to be a boolean value or a ' + 'string, is type %s instead.' % type(connect))) + self._initial_connect = connect + if not connect is False: + self.connect(connect, level=1) + + # Activate name attribute access + self._enable_group_attributes() + + N = property(fget=lambda self: self.item_mapping.N, + doc='Total number of synapses') + + def __len__(self): + return self.N + + def before_run(self, namespace): + self.lastupdate = self.clock.t + super(Synapses, self).before_run(namespace) + + def _add_updater(self, code, prepost, objname=None): + ''' + Add a new target updater. Users should call `add_pre` or `add_post` + instead. + + Parameters + ---------- + code : str + The abstract code that should be executed on pre-/postsynaptic + spikes. + prepost : {'pre', 'post'} + Whether the code is triggered by presynaptic or postsynaptic spikes + objname : str, optional + A name for the object, see `SynapticPathway` for more details. + + Returns + ------- + objname : str + The final name for the object. Equals `objname` if it was explicitly + given (and did not end in a wildcard character). + + ''' + if prepost == 'pre': + spike_group, group_name = self.source, 'Source' + elif prepost == 'post': + spike_group = self.target, 'Target' + else: + raise ValueError(('"prepost" argument has to be "pre" or "post", ' + 'is "%s".') % prepost) + + if not hasattr(spike_group, 'spikes') and hasattr(spike_group, 'clock'): + raise TypeError(('%s has to be a SpikeSource with spikes and' + ' clock attribute. Is type %r instead') + % (group_name, type(spike_group))) + + updater = SynapticPathway(self, code, prepost, objname) + objname = updater.objname + if hasattr(self, objname): + raise ValueError(('Cannot add updater with name "{name}", synapses ' + 'object already has an attribute with this ' + 'name.').format(name=objname)) + + setattr(self, objname, updater) + self._synaptic_updaters.append(objname) + self.contained_objects.append(updater) + return objname + + def _create_variables(self, dtype=None): + ''' + Create the variables dictionary for this `Synapses`, containing + entries for the equation variables and some standard entries. + ''' + if dtype is None: + dtype = defaultdict(lambda: brian_prefs['core.default_scalar_dtype']) + elif isinstance(dtype, np.dtype): + dtype = defaultdict(lambda: dtype) + elif not hasattr(dtype, '__getitem__'): + raise TypeError(('Cannot use type %s as dtype ' + 'specification') % type(dtype)) + + # Add all the pre and post variables with _pre and _post suffixes + v = {} + self.variable_indices = defaultdict(lambda: '_idx') + for name, var in getattr(self.source, 'variables', {}).iteritems(): + if isinstance(var, (ArrayVariable, Subexpression)): + v[name + '_pre'] = var + self.variable_indices[name + '_pre'] = '_presynaptic_idx' + for name, var in getattr(self.target, 'variables', {}).iteritems(): + if isinstance(var, (ArrayVariable, Subexpression)): + v[name + '_post'] = var + self.variable_indices[name + '_post'] = '_postsynaptic_idx' + # Also add all the post variables without a suffix -- if this + # clashes with the name of a state variable defined in this + # Synapses group, the latter will overwrite the entry later and + # take precedence + v[name] = var + self.variable_indices[name] = '_postsynaptic_idx' + + self.pre_synaptic = [DynamicArray1D(0, dtype=np.int32) + for _ in xrange(len(self.source))] + self.post_synaptic = [DynamicArray1D(0, dtype=np.int32) + for _ in xrange(len(self.target))] + + dev = get_device() + # Standard variables always present + v.update({'t': AttributeVariable(second, self.clock, 't_', + constant=False), + 'dt': AttributeVariable(second, self.clock, 'dt_', + constant=True), + '_num_source_neurons': Variable(Unit(1), len(self.source), + constant=True), + '_num_target_neurons': Variable(Unit(1), len(self.target), + constant=True), + '_source_offset': Variable(Unit(1), self.source.offset, + constant=True), + '_target_offset': Variable(Unit(1), self.target.offset, + constant=True), + '_synaptic_pre': dev.dynamic_array_1d(self, '_synaptic_pre', + 0, Unit(1), dtype=np.int32), + '_synaptic_post': dev.dynamic_array_1d(self, '_synaptic_post', + 0, Unit(1), dtype=np.int32), + # We don't need "proper" specifier for these -- they go + # back to Python code currently + '_pre_synaptic': Variable(Unit(1), self.pre_synaptic), + '_post_synaptic': Variable(Unit(1), self.post_synaptic)}) + + for eq in itertools.chain(self.equations.itervalues(), + self.event_driven.itervalues() + if self.event_driven is not None else []): + if eq.type in (DIFFERENTIAL_EQUATION, PARAMETER): + constant = ('constant' in eq.flags) + # We are dealing with dynamic arrays here, code generation + # shouldn't directly access the specifier.array attribute but + # use specifier.get_value() to get a reference to the underlying + # array + v[eq.varname] = dev.dynamic_array_1d(self, + eq.varname, + 0, + eq.unit, + dtype=dtype[eq.varname], + constant=constant, + is_bool=eq.is_bool) + if eq.varname in self.variable_indices: + # we are overwriting a postsynaptic variable of the same + # name, delete the reference to the postsynaptic index + del self.variable_indices[eq.varname] + elif eq.type == STATIC_EQUATION: + v.update({eq.varname: Subexpression(eq.unit, + brian_prefs['core.default_scalar_dtype'], + str(eq.expr), + is_bool=eq.is_bool)}) + else: + raise AssertionError('Unknown type of equation: ' + eq.eq_type) + + # Stochastic variables + for xi in self.equations.stochastic_variables: + v.update({xi: StochasticVariable()}) + + return v + + def connect(self, pre_or_cond, post=None, p=1., n=1, level=0): + ''' + Add synapses. The first argument can be either a presynaptic index + (int or array) or a condition for synapse creation in the form of a + string that evaluates to a boolean value (or directly a boolean value). + If it is given as an index, also `post` has to be present. A string + condition will be evaluated for all pre-/postsynaptic indices, which + can be referred to as `i` and `j`. + + Parameters + ---------- + pre_or_cond : {int, ndarray of int, bool, str} + The presynaptic neurons (in the form of an index or an array of + indices) or a boolean value or a string that evaluates to a + boolean value. If it is an index, then also `post` has to be + given. + post_neurons : {int, ndarray of int), optional + GroupIndices of neurons from the target group. Non-optional if one or + more presynaptic indices have been given. + p : float, optional + The probability to create `n` synapses wherever the condition + given as `pre_or_cond` evaluates to true or for the given + pre/post indices. + n : int, optional + The number of synapses to create per pre/post connection pair. + Defaults to 1. + + Examples + -------- + >>> from brian2 import * + >>> import numpy as np + >>> G = NeuronGroup(10, 'dv/dt = -v / tau : 1', threshold='v>1', reset='v=0') + >>> S = Synapses(G, G, 'w:1', pre='v+=w') + >>> S.connect('i != j') # all-to-all but no self-connections + >>> S.connect(0, 0) # connect neuron 0 to itself + >>> S.connect(np.array([1, 2]), np.array([2, 1])) # connect 1->2 and 2->1 + >>> S.connect(True) # connect all-to-all + >>> S.connect('i != j', p=0.1) # Connect neurons with 10% probability, exclude self-connections + >>> S.connect('i == j', n=2) # Connect all neurons to themselves with 2 synapses + ''' + if not isinstance(pre_or_cond, (bool, basestring)): + pre_or_cond = np.asarray(pre_or_cond) + if not np.issubdtype(pre_or_cond.dtype, np.int): + raise TypeError(('Presynaptic indices have to be given as ' + 'integers, are type %s instead.') % pre_or_cond.dtype) + + post = np.asarray(post) + if not np.issubdtype(post.dtype, np.int): + raise TypeError(('Presynaptic indices can only be combined ' + 'with postsynaptic integer indices))')) + if isinstance(n, basestring): + raise TypeError(('Indices cannot be combined with a string' + 'expression for n. Either use an array/scalar ' + 'for n, or a string expression for the ' + 'connections')) + i, j, n = np.broadcast_arrays(pre_or_cond, post, n) + if i.ndim > 1: + raise ValueError('Can only use 1-dimensional indices') + self.item_mapping._add_synapses(i, j, n, p, level=level+1) + elif isinstance(pre_or_cond, (basestring, bool)): + if pre_or_cond is False: + return # nothing to do... + elif pre_or_cond is True: + # TODO: This should not be handled with the general mechanism + pre_or_cond = 'True' + if post is not None: + raise ValueError('Cannot give a postsynaptic index when ' + 'using a string expression') + if not isinstance(n, (int, basestring)): + raise TypeError('n has to be an integer or a string evaluating ' + 'to an integer, is type %s instead.' % type(n)) + if not isinstance(p, (float, basestring)): + raise TypeError('p has to be a float or a string evaluating ' + 'to an float, is type %s instead.' % type(n)) + self.item_mapping._add_synapses(None, None, n, p, condition=pre_or_cond, + level=level+1) + else: + raise TypeError(('First argument has to be an index or a ' + 'string, is %s instead.') % type(pre_or_cond)) + + +def smallest_inttype(N): + ''' + Returns the smallest signed integer dtype that can store N indexes. + ''' + if N<=127: + return np.int8 + elif N<=32727: + return np.int16 + elif N<=2147483647: + return np.int32 + else: + return np.int64 diff --git a/brian2/tests/test_clocks.py b/brian2/tests/test_clocks.py index eaa3b6648..d0432566f 100644 --- a/brian2/tests/test_clocks.py +++ b/brian2/tests/test_clocks.py @@ -17,7 +17,6 @@ def test_clocks(): assert_equal(clock.i_end, 100) clock.i_end = 200 assert_equal(clock.t_end, 200*ms) - assert_raises(RuntimeError, lambda: setattr(clock, 'dt', 2*ms)) clock.t_ = float(8*ms) assert_equal(clock.i, 8) clock.t = 0*ms @@ -31,15 +30,14 @@ def test_clocks(): assert_equal(clock.t, 0*ms) clock = Clock() + assert_equal(clock.dt, 0.1*ms) clock.dt = 1*ms assert_equal(clock.dt, 1*ms) - assert_raises(RuntimeError, lambda: setattr(clock, 'dt', 2*ms)) @with_setup(teardown=restore_initial_state) def test_defaultclock(): defaultclock.dt = 1*ms assert_equal(defaultclock.dt, 1*ms) - assert_raises(RuntimeError, lambda: setattr(defaultclock, 'dt', 2*ms)) if __name__=='__main__': test_clocks() diff --git a/brian2/tests/test_codegen.py b/brian2/tests/test_codegen.py index b76c596cf..3e2c35bda 100644 --- a/brian2/tests/test_codegen.py +++ b/brian2/tests/test_codegen.py @@ -26,12 +26,9 @@ def test_get_identifiers_recursively(): ''' Test finding identifiers including subexpressions. ''' - variables = {} - variables['sub1'] = Subexpression(Unit(1), np.float32, 'sub2 * z', - variables, {}) - variables['sub2'] = Subexpression(Unit(1), np.float32, '5 + y', - variables, {}) - variables['x'] = Variable(unit=None) + variables = {'sub1': Subexpression(Unit(1), np.float32, 'sub2 * z'), + 'sub2': Subexpression(Unit(1), np.float32, '5 + y'), + 'x': Variable(unit=None)} identifiers = get_identifiers_recursively('_x = sub1 + x', variables) assert identifiers == set(['x', '_x', 'y', 'z', 'sub1', 'sub2']) diff --git a/brian2/tests/test_refractory.py b/brian2/tests/test_refractory.py index 1d30bb5c9..51b11c01a 100644 --- a/brian2/tests/test_refractory.py +++ b/brian2/tests/test_refractory.py @@ -51,6 +51,7 @@ def test_refractoriness_variables(): mon = StateMonitor(G, ['v', 'w'], record=True) net = Network(G, mon) net.run(20*ms) + print 'mon.t', mon.t # No difference before the spike assert_equal(mon[0].v[mon.t < 10*ms], mon[0].w[mon.t < 10*ms]) # v is not updated during refractoriness diff --git a/brian2/tests/test_synapses.py b/brian2/tests/test_synapses.py index b22c570b8..8046670a2 100644 --- a/brian2/tests/test_synapses.py +++ b/brian2/tests/test_synapses.py @@ -359,7 +359,6 @@ def test_delay_specification(): assert_raises(ValueError, lambda: Synapses(G, G, 'w:1', pre='v+=w', delay={'post': 5*ms})) - def test_transmission(): delays = [[0, 0] * ms, [1, 1] * ms, [1, 2] * ms] for codeobj_class, delay in zip(codeobj_classes, delays): @@ -388,6 +387,30 @@ def test_transmission(): target_mon.t[target_mon.i==1] - defaultclock.dt - delay[1]) +def test_changed_dt_spikes_in_queue(): + defaultclock.dt = .5*ms + G1 = NeuronGroup(1, 'v:1', threshold='v>1', reset='v=0') + G1.v = 1.1 + G2 = NeuronGroup(10, 'v:1', threshold='v>1', reset='v=0') + S = Synapses(G1, G2, pre='v+=1.1') + S.connect(True) + S.delay = 'j*ms' + mon = SpikeMonitor(G2) + net = Network(G1, G2, S, mon) + net.run(5*ms) + defaultclock.dt = 1*ms + net.run(3*ms) + defaultclock.dt = 0.1*ms + net.run(2*ms) + # Spikes should have delays of 0, 1, 2, ... ms and always + # trigger a spike one dt later + expected = [0.5, 1.5, 2.5, 3.5, 4.5, # dt=0.5ms + 6, 7, 8, #dt = 1ms + 8.1, 9.1 #dt=0.1ms + ] * ms + assert_equal(mon.t, expected) + + def test_lumped_variable(): source = NeuronGroup(2, 'v : 1', threshold='v>1', reset='v=0') source.v = 1.1 # will spike immediately @@ -484,6 +507,7 @@ def test_repr(): test_state_variable_indexing() test_delay_specification() test_transmission() + test_changed_dt_spikes_in_queue() test_lumped_variable() test_event_driven() test_repr() diff --git a/dev/ideas/devices/cpp_standalone.py b/dev/ideas/devices/cpp_standalone.py index 3ee03655a..7f1f51ab3 100644 --- a/dev/ideas/devices/cpp_standalone.py +++ b/dev/ideas/devices/cpp_standalone.py @@ -34,7 +34,6 @@ M, #G2, ) - net.run(0*second) build(net) diff --git a/docs_sphinx/developer/new_magic_and_clocks.rst b/docs_sphinx/developer/new_magic_and_clocks.rst index 3c99d3894..439e137a2 100644 --- a/docs_sphinx/developer/new_magic_and_clocks.rst +++ b/docs_sphinx/developer/new_magic_and_clocks.rst @@ -4,7 +4,7 @@ New magic and clock behaviour Clocks ------ -The rule for clocks was that +The rule for clocks in Brian 1 was that you would either specify a clock explicitly, or it would guess it based on the following rule: if there is no clock defined in the execution frame of the object being defined, use the default clock; if there is a single clock @@ -21,16 +21,11 @@ introduce. Incidentally, you could also change the dt of a clock after it had been defined, which would invalidate any state updaters that -were based on a fixed dt. Consequently, in the new design you cannot change the -dt of a clock once it has been specified. This has one huge problem, it would -mean that you couldn't change the dt of the defaultclock, which would be too -annoying in practice. So, you can now leave the dt of a clock unspecified when -you create the clock, and it can be left unspecified until the .dt attribute -is accessed. If no value is specified, it uses ``dt=0.1*ms``, and the .dt -attribute can be set precisely once (if it was left unspecified when created). -This means you can now set ``defaultclock.dt`` precisely once. Again, this is -slightly less flexible than the old system, but avoids many potentially -confusing bugs. +were based on a fixed dt. This is no longer a problem in Brian 2, since state +updaters are re-built at every run so they work fine with a changed dt. It is +important to note that the dt of the respective clock (i.e. in many cases, +``defaultclock.dt``) at the time of the `run` call, not the dt during +the `NeuronGroup` creation, for example, is relevant for the simulation. Magic -----