diff --git a/brian2/codegen/codeobject.py b/brian2/codegen/codeobject.py index 5d7b59814..78fcc69c6 100644 --- a/brian2/codegen/codeobject.py +++ b/brian2/codegen/codeobject.py @@ -1,9 +1,10 @@ import functools -from brian2.core.specifiers import (ArrayVariable, Value, - AttributeValue, Subexpression) +from brian2.core.variables import (ArrayVariable, Variable, + AttributeVariable, Subexpression, + StochasticVariable) from .functions.base import Function -from brian2.core.preferences import brian_prefs, BrianPreference +from brian2.core.preferences import brian_prefs from brian2.utils.logger import get_logger from .translation import translate from .runtime.targets import runtime_targets @@ -30,11 +31,11 @@ def get_default_codeobject_class(): return codeobj_class -def prepare_namespace(namespace, specifiers): +def prepare_namespace(namespace, variables): namespace = dict(namespace) # Add variables referring to the arrays arrays = [] - for value in specifiers.itervalues(): + for value in variables.itervalues(): if isinstance(value, ArrayVariable): arrays.append((value.arrayname, value.get_value())) namespace.update(arrays) @@ -42,8 +43,9 @@ def prepare_namespace(namespace, specifiers): return namespace -def create_codeobject(name, abstract_code, namespace, specifiers, template_name, - codeobj_class=None, indices=None, template_kwds=None): +def create_codeobject(name, abstract_code, namespace, variables, template_name, + indices, variable_indices, codeobj_class=None, + template_kwds=None): ''' The following arguments keywords are passed to the template: @@ -54,8 +56,6 @@ def create_codeobject(name, abstract_code, namespace, specifiers, template_name, ``template_kwds`` (but you should ensure there are no name clashes. ''' - if indices is None: # TODO: Do we ever create code without any index? - indices = {} if template_kwds is None: template_kwds = dict() @@ -68,19 +68,22 @@ def create_codeobject(name, abstract_code, namespace, specifiers, template_name, template = get_codeobject_template(template_name, codeobj_class=codeobj_class) - namespace = prepare_namespace(namespace, specifiers) + namespace = prepare_namespace(namespace, variables) logger.debug(name + " abstract code:\n" + abstract_code) - innercode, kwds = translate(abstract_code, specifiers, namespace, - brian_prefs['core.default_scalar_dtype'], - codeobj_class.language, indices) + iterate_all = template.iterate_all + innercode, kwds = translate(abstract_code, variables, namespace, + dtype=brian_prefs['core.default_scalar_dtype'], + language=codeobj_class.language, + variable_indices=variable_indices, + iterate_all=iterate_all) template_kwds.update(kwds) logger.debug(name + " inner code:\n" + str(innercode)) code = template(innercode, **template_kwds) logger.debug(name + " code:\n" + str(code)) - specifiers.update(indices) - codeobj = codeobj_class(code, namespace, specifiers) + variables.update(indices) + codeobj = codeobj_class(code, namespace, variables) codeobj.compile() return codeobj @@ -111,40 +114,43 @@ class CodeObject(object): #: The `Language` used by this `CodeObject` language = None - def __init__(self, code, namespace, specifiers): + def __init__(self, code, namespace, variables): self.code = code - self.compile_methods = self.get_compile_methods(specifiers) + self.compile_methods = self.get_compile_methods(variables) self.namespace = namespace - self.specifiers = specifiers + self.variables = variables - # Specifiers can refer to values that are either constant (e.g. dt) + # Variables can refer to values that are either constant (e.g. dt) # or change every timestep (e.g. t). We add the values of the - # constant specifiers here and add the names of non-constant specifiers + # constant variables here and add the names of non-constant variables # to a list # A list containing tuples of name and a function giving the value self.nonconstant_values = [] - for name, spec in self.specifiers.iteritems(): - if isinstance(spec, Value): - if isinstance(spec, AttributeValue): - self.nonconstant_values.append((name, spec.get_value)) - if not spec.scalar: + for name, var in self.variables.iteritems(): + if isinstance(var, Variable) and not isinstance(var, Subexpression): + if not var.constant: + self.nonconstant_values.append((name, var.get_value)) + if not var.scalar: self.nonconstant_values.append(('_num' + name, - spec.get_len)) - elif not isinstance(spec, Subexpression): - value = spec.get_value() + var.get_len)) + else: + try: + value = var.get_value() + except TypeError: # A dummy Variable without value + continue self.namespace[name] = value # if it is a type that has a length, add a variable called # '_num'+name with its length - if not spec.scalar: - self.namespace['_num' + name] = spec.get_len() + if not var.scalar: + self.namespace['_num' + name] = var.get_len() - def get_compile_methods(self, specifiers): + def get_compile_methods(self, variables): meths = [] - for var, spec in specifiers.items(): - if isinstance(spec, Function): - meths.append(functools.partial(spec.on_compile, + for var, var in variables.items(): + if isinstance(var, Function): + meths.append(functools.partial(var.on_compile, language=self.language, var=var)) return meths diff --git a/brian2/codegen/languages/base.py b/brian2/codegen/languages/base.py index e8566d025..b773857cc 100644 --- a/brian2/codegen/languages/base.py +++ b/brian2/codegen/languages/base.py @@ -2,7 +2,7 @@ Base class for languages, gives the methods which should be overridden to implement a new language. ''' -from brian2.core.specifiers import (ArrayVariable, Value, AttributeValue, +from brian2.core.variables import (ArrayVariable, AttributeVariable, Subexpression) from brian2.utils.stringtools import get_identifiers @@ -34,7 +34,8 @@ def translate_statement(self, statement): ''' raise NotImplementedError - def translate_statement_sequence(self, statements, specifiers, namespace, indices): + def translate_statement_sequence(self, statements, variables, namespace, + variable_indices, iterate_all): ''' Translate a sequence of `Statement` into the target language, taking care to declare variables, etc. if necessary. @@ -45,7 +46,7 @@ def translate_statement_sequence(self, statements, specifiers, namespace, indice ''' raise NotImplementedError - def array_read_write(self, statements, specifiers): + def array_read_write(self, statements, variables): ''' Helper function, gives the set of ArrayVariables that are read from and written to in the series of statements. Returns the pair read, write @@ -60,6 +61,8 @@ def array_read_write(self, statements, specifiers): ids.add(stmt.var) read = read.union(ids) write.add(stmt.var) - read = set(var for var, spec in specifiers.items() if isinstance(spec, ArrayVariable) and var in read) - write = set(var for var, spec in specifiers.items() if isinstance(spec, ArrayVariable) and var in write) + read = set(varname for varname, var in variables.items() + if isinstance(var, ArrayVariable) and varname in read) + write = set(varname for varname, var in variables.items() + if isinstance(var, ArrayVariable) and varname in write) return read, write diff --git a/brian2/codegen/languages/cpp_lang.py b/brian2/codegen/languages/cpp_lang.py index 4a1ebc6ed..1340e02bf 100644 --- a/brian2/codegen/languages/cpp_lang.py +++ b/brian2/codegen/languages/cpp_lang.py @@ -10,7 +10,7 @@ from brian2.utils.logger import get_logger from brian2.parsing.rendering import CPPNodeRenderer from brian2.core.preferences import brian_prefs, BrianPreference -from brian2.core.specifiers import ArrayVariable +from brian2.core.variables import ArrayVariable from .base import Language @@ -125,35 +125,38 @@ def translate_statement(self, statement): decl = '' return decl + var + ' ' + op + ' ' + self.translate_expression(expr) + ';' - def translate_statement_sequence(self, statements, specifiers, namespace, indices): - read, write = self.array_read_write(statements, specifiers) + def translate_statement_sequence(self, statements, variables, namespace, + variable_indices, iterate_all): + + # Note that C++ code does not care about the iterate_all argument -- it + # always has to loop over the elements + + read, write = self.array_read_write(statements, variables) lines = [] # read arrays - for var in read: - index_var = specifiers[var].index - index_spec = indices[index_var] - spec = specifiers[var] - if var not in write: + for varname in read: + index_var = variable_indices[varname] + var = variables[varname] + if varname not in write: line = 'const ' else: line = '' - line = line + c_data_type(spec.dtype) + ' ' + var + ' = ' - line = line + '_ptr' + spec.arrayname + '[' + index_var + '];' + line = line + c_data_type(var.dtype) + ' ' + varname + ' = ' + line = line + '_ptr' + var.arrayname + '[' + index_var + '];' lines.append(line) # simply declare variables that will be written but not read - for var in write: - if var not in read: - spec = specifiers[var] - line = c_data_type(spec.dtype) + ' ' + var + ';' + for varname in write: + if varname not in read: + var = variables[varname] + line = c_data_type(var.dtype) + ' ' + varname + ';' lines.append(line) # the actual code lines.extend([self.translate_statement(stmt) for stmt in statements]) # write arrays - for var in write: - index_var = specifiers[var].index - index_spec = indices[index_var] - spec = specifiers[var] - line = '_ptr' + spec.arrayname + '[' + index_var + '] = ' + var + ';' + for varname in write: + index_var = variable_indices[varname] + var = variables[varname] + line = '_ptr' + var.arrayname + '[' + index_var + '] = ' + varname + ';' lines.append(line) code = '\n'.join(lines) # set up the restricted pointers, these are used so that the compiler @@ -163,14 +166,11 @@ def translate_statement_sequence(self, statements, specifiers, namespace, indice # same array. E.g. in gapjunction code, v_pre and v_post refer to the # same array if a group is connected to itself arraynames = set() - for var, spec in specifiers.iteritems(): - if isinstance(spec, ArrayVariable): - arrayname = spec.arrayname + for varname, var in variables.iteritems(): + if isinstance(var, ArrayVariable): + arrayname = var.arrayname if not arrayname in arraynames: - if spec.dtype != spec.array.dtype: - print spec.array - raise AssertionError('Conflicting dtype information for %s: %s - %s' % (var, spec.dtype, spec.array.dtype)) - line = c_data_type(spec.dtype) + ' * ' + self.restrict + '_ptr' + arrayname + ' = ' + arrayname + ';' + line = c_data_type(var.dtype) + ' * ' + self.restrict + '_ptr' + arrayname + ' = ' + arrayname + ';' lines.append(line) arraynames.add(arrayname) pointers = '\n'.join(lines) @@ -179,23 +179,22 @@ def translate_statement_sequence(self, statements, specifiers, namespace, indice user_functions = [] support_code = '' hash_defines = '' - for var, spec in itertools.chain(namespace.items(), - specifiers.items()): - if isinstance(spec, Function): - user_functions.append(var) - speccode = spec.code(self, var) + for varname, variable in namespace.items(): + if isinstance(variable, Function): + user_functions.append(varname) + speccode = variable.code(self, varname) support_code += '\n' + deindent(speccode['support_code']) hash_defines += deindent(speccode['hashdefine_code']) # add the Python function with a leading '_python', if it # exists. This allows the function to make use of the Python # function via weave if necessary (e.g. in the case of randn) - if not spec.pyfunc is None: - pyfunc_name = '_python_' + var - if pyfunc_name in namespace: + if not variable.pyfunc is None: + pyfunc_name = '_python_' + varname + if pyfunc_name in namespace: logger.warn(('Namespace already contains function %s, ' 'not replacing it') % pyfunc_name) else: - namespace[pyfunc_name] = spec.pyfunc + namespace[pyfunc_name] = variable.pyfunc # delete the user-defined functions from the namespace for func in user_functions: diff --git a/brian2/codegen/languages/numpy_lang.py b/brian2/codegen/languages/numpy_lang.py index ad631d8fa..5ac01ea5e 100644 --- a/brian2/codegen/languages/numpy_lang.py +++ b/brian2/codegen/languages/numpy_lang.py @@ -26,26 +26,26 @@ def translate_statement(self, statement): op = '=' return var + ' ' + op + ' ' + self.translate_expression(expr) - def translate_statement_sequence(self, statements, specifiers, namespace, indices): - read, write = self.array_read_write(statements, specifiers) + def translate_statement_sequence(self, statements, variables, namespace, + variable_indices, iterate_all): + read, write = self.array_read_write(statements, variables) lines = [] # read arrays for var in read: - spec = specifiers[var] - index_spec = indices[spec.index] + spec = variables[var] + index = variable_indices[var] line = var + ' = ' + spec.arrayname - if not index_spec.iterate_all: - line = line + '[' + spec.index + ']' + if not index in iterate_all: + line = line + '[' + index + ']' lines.append(line) # the actual code lines.extend([self.translate_statement(stmt) for stmt in statements]) # write arrays for var in write: - index_var = specifiers[var].index - index_spec = indices[index_var] + index_var = variable_indices[var] # check if all operations were inplace and we're operating on the # whole vector, if so we don't need to write the array back - if not index_spec.iterate_all: + if not index_var in iterate_all: all_inplace = False else: all_inplace = True @@ -54,8 +54,8 @@ def translate_statement_sequence(self, statements, specifiers, namespace, indice all_inplace = False break if not all_inplace: - line = specifiers[var].arrayname - if index_spec.iterate_all: + line = variables[var].arrayname + if index_var in iterate_all: line = line + '[:]' else: line = line + '[' + index_var + ']' diff --git a/brian2/codegen/runtime/numpy_rt/numpy_rt.py b/brian2/codegen/runtime/numpy_rt/numpy_rt.py index e89a3352c..998713652 100644 --- a/brian2/codegen/runtime/numpy_rt/numpy_rt.py +++ b/brian2/codegen/runtime/numpy_rt/numpy_rt.py @@ -8,6 +8,7 @@ __all__ = ['NumpyCodeObject'] + class NumpyCodeObject(CodeObject): ''' Execute code using Numpy @@ -18,10 +19,10 @@ class NumpyCodeObject(CodeObject): 'templates')) language = NumpyLanguage() - def __init__(self, code, namespace, specifiers): + def __init__(self, code, namespace, variables): # TODO: This should maybe go somewhere else namespace['logical_not'] = np.logical_not - CodeObject.__init__(self, code, namespace, specifiers) + CodeObject.__init__(self, code, namespace, variables) def compile(self): super(NumpyCodeObject, self).compile() diff --git a/brian2/codegen/runtime/numpy_rt/templates/lumped_variable.py_ b/brian2/codegen/runtime/numpy_rt/templates/lumped_variable.py_ index 8b337b68c..b7c3196a1 100644 --- a/brian2/codegen/runtime/numpy_rt/templates/lumped_variable.py_ +++ b/brian2/codegen/runtime/numpy_rt/templates/lumped_variable.py_ @@ -1,4 +1,5 @@ -# USE_SPECIFIERS { _post_synaptic, _synaptic_pre, _synaptic_post } +# USES_VARIABLES { _post_synaptic, _synaptic_pre, _synaptic_post } +# ITERATE_ALL { _idx } import numpy as np diff --git a/brian2/codegen/runtime/numpy_rt/templates/ratemonitor.py_ b/brian2/codegen/runtime/numpy_rt/templates/ratemonitor.py_ index cc966b95e..ddd5606d4 100644 --- a/brian2/codegen/runtime/numpy_rt/templates/ratemonitor.py_ +++ b/brian2/codegen/runtime/numpy_rt/templates/ratemonitor.py_ @@ -1,4 +1,4 @@ -# { USE_SPECIFIERS _rate, _t, _spikes, _num_source_neurons, t, dt } +# { USES_VARIABLES _rate, _t, _spikes, _num_source_neurons, t, dt } _new_len = len(_t) + 1 _t.resize(_new_len) diff --git a/brian2/codegen/runtime/numpy_rt/templates/reset.py_ b/brian2/codegen/runtime/numpy_rt/templates/reset.py_ index 1543ac284..6436dafc7 100644 --- a/brian2/codegen/runtime/numpy_rt/templates/reset.py_ +++ b/brian2/codegen/runtime/numpy_rt/templates/reset.py_ @@ -1,6 +1,6 @@ -# USE_SPECIFIERS { _spikes } -_neuron_idx = _spikes -_vectorisation_idx = _neuron_idx +# USES_VARIABLES { _spikes } +_idx = _spikes +_vectorisation_idx = _idx {% for line in code_lines %} {{line}} {% endfor %} diff --git a/brian2/codegen/runtime/numpy_rt/templates/spikemonitor.py_ b/brian2/codegen/runtime/numpy_rt/templates/spikemonitor.py_ index 35d125755..4ba82b17c 100644 --- a/brian2/codegen/runtime/numpy_rt/templates/spikemonitor.py_ +++ b/brian2/codegen/runtime/numpy_rt/templates/spikemonitor.py_ @@ -1,8 +1,11 @@ -# { USE_SPECIFIERS _i, _t, _spikes, _count, _num_source_neurons, t } - +# { USES_VARIABLES _i, _t, _spikes, _count, t, _source_start, _source_end} +import numpy as np +# Take subgroups into account +_spikes = np.asarray(_spikes) +_spikes = _spikes[(_spikes >= _source_start) & (_spikes < _source_end)] +_spikes -= _source_start _n_spikes = len(_spikes) if _n_spikes > 0: - import numpy as np _curlen = len(_t) _newlen = _curlen + _n_spikes @@ -12,4 +15,4 @@ if _n_spikes > 0: _i[_curlen:_newlen] = _spikes # This is slow but correctly handles multiple spikes per neuron - _count += np.bincount(_spikes, minlength=_num_source_neurons); \ No newline at end of file + _count += np.bincount(_spikes, minlength=_source_end-_source_start); \ No newline at end of file diff --git a/brian2/codegen/runtime/numpy_rt/templates/state_variable_indexing.py_ b/brian2/codegen/runtime/numpy_rt/templates/state_variable_indexing.py_ index 3c82fd2c4..bbc023b60 100644 --- a/brian2/codegen/runtime/numpy_rt/templates/state_variable_indexing.py_ +++ b/brian2/codegen/runtime/numpy_rt/templates/state_variable_indexing.py_ @@ -1,5 +1,6 @@ -# USE_SPECIFIERS { _indices } -_vectorisation_idx = _indices +# ITERATE_ALL { _idx } + +_vectorisation_idx = _idx {% for line in code_lines %} {{line}} {% endfor %} diff --git a/brian2/codegen/runtime/numpy_rt/templates/statemonitor.py_ b/brian2/codegen/runtime/numpy_rt/templates/statemonitor.py_ index 640f432f1..dff073811 100644 --- a/brian2/codegen/runtime/numpy_rt/templates/statemonitor.py_ +++ b/brian2/codegen/runtime/numpy_rt/templates/statemonitor.py_ @@ -1,4 +1,4 @@ -# USE_SPECIFIERS { _t, _clock_t, _indices } +# USES_VARIABLES { _t, _clock_t, _indices } # Resize dynamic arrays _new_len = len(_t) + 1 @@ -12,7 +12,7 @@ _recorded_{{_varname}}.resize((_new_len, _num_indices)) _t[-1] = _clock_t _vectorisation_idx = _indices -_neuron_idx = _indices[:] +_idx = _indices[:] {% for line in code_lines %} {{line}} {% endfor %} diff --git a/brian2/codegen/runtime/numpy_rt/templates/stateupdate.py_ b/brian2/codegen/runtime/numpy_rt/templates/stateupdate.py_ index e85aac2e2..e902c9a5e 100644 --- a/brian2/codegen/runtime/numpy_rt/templates/stateupdate.py_ +++ b/brian2/codegen/runtime/numpy_rt/templates/stateupdate.py_ @@ -1,5 +1,6 @@ -# USE_SPECIFIERS { _indices } -_vectorisation_idx = _indices +# ITERATE_ALL { _idx } + +_vectorisation_idx = _idx {% for line in code_lines %} {{line}} {% endfor %} \ No newline at end of file diff --git a/brian2/codegen/runtime/numpy_rt/templates/synapses.py_ b/brian2/codegen/runtime/numpy_rt/templates/synapses.py_ index 0febf53f4..6ade64a52 100644 --- a/brian2/codegen/runtime/numpy_rt/templates/synapses.py_ +++ b/brian2/codegen/runtime/numpy_rt/templates/synapses.py_ @@ -1,8 +1,8 @@ -# USE_SPECIFIERS { _synaptic_post, _spiking_synapses } +# USES_VARIABLES { _synaptic_post, _spiking_synapses, _target_offset } import numpy as np -_post_neurons = _synaptic_post.take(_spiking_synapses) +_post_neurons = _synaptic_post.take(_spiking_synapses) + _target_offset _perm = _post_neurons.argsort() _aux = _post_neurons.take(_perm) _flag = np.empty(len(_aux)+1, dtype=bool) @@ -14,9 +14,9 @@ while len(_F): _u = _aux.take(_F) _i = _perm.take(_F) _postsynaptic_idx = _u - _neuron_idx = _spiking_synapses[_i] + _idx = _spiking_synapses[_i] # TODO: how do we get presynaptic indices? do we need to? - _vectorisation_idx = _neuron_idx + _vectorisation_idx = _idx {% for line in code_lines %} {{line}} {% endfor %} diff --git a/brian2/codegen/runtime/numpy_rt/templates/synapses_create.py_ b/brian2/codegen/runtime/numpy_rt/templates/synapses_create.py_ index 13149aab2..098a1dad5 100644 --- a/brian2/codegen/runtime/numpy_rt/templates/synapses_create.py_ +++ b/brian2/codegen/runtime/numpy_rt/templates/synapses_create.py_ @@ -1,4 +1,4 @@ -# USE_SPECIFIERS { _num_source_neurons, _num_target_neurons, _synaptic_pre, _synaptic_post, _pre_synaptic, _post_synaptic } +# USES_VARIABLES { _source_neurons, _target_neurons, _synaptic_pre, _synaptic_post, _pre_synaptic, _post_synaptic } import numpy as np @@ -30,16 +30,15 @@ for i in range(_num_source_neurons): _new_num_synapses = _cur_num_synapses + _numnew _synaptic_pre.resize(_new_num_synapses) _synaptic_post.resize(_new_num_synapses) - _synaptic_pre[_cur_num_synapses:] = i - _synaptic_post[_cur_num_synapses:] = _cond_nonzero - + _synaptic_pre[_cur_num_synapses:] = _source_neurons[i] + _synaptic_post[_cur_num_synapses:] = _target_neurons[_cond_nonzero] _new_synapses = np.arange(_cur_num_synapses, _new_num_synapses) - _source_synapses = _pre_synaptic[i] + _source_synapses = _pre_synaptic[_source_neurons[i]] _cur_num_source_synapses = len(_source_synapses) _source_synapses.resize(_cur_num_source_synapses + _numnew) _source_synapses[_cur_num_source_synapses:] = _new_synapses for _new_synapse, _target in zip(_new_synapses, _cond_nonzero): - _target_synapses = _post_synaptic[_target] + _target_synapses = _post_synaptic[_target_neurons[_target]] _cur_num_target_synapses = len(_target_synapses) _target_synapses.resize(_cur_num_target_synapses + 1) _target_synapses[_cur_num_target_synapses] = _new_synapse diff --git a/brian2/codegen/runtime/numpy_rt/templates/threshold.py_ b/brian2/codegen/runtime/numpy_rt/templates/threshold.py_ index 0ade80eb3..c3540e04e 100644 --- a/brian2/codegen/runtime/numpy_rt/templates/threshold.py_ +++ b/brian2/codegen/runtime/numpy_rt/templates/threshold.py_ @@ -1,4 +1,5 @@ -# USE_SPECIFIERS { not_refractory, lastspike, t } +# USES_VARIABLES { not_refractory, lastspike, t } +# ITERATE_ALL { _idx } {% for line in code_lines %} {{line}} diff --git a/brian2/codegen/runtime/weave_rt/templates/lumped_variable.cpp b/brian2/codegen/runtime/weave_rt/templates/lumped_variable.cpp index d5cc70d1a..8c9c094c9 100644 --- a/brian2/codegen/runtime/weave_rt/templates/lumped_variable.cpp +++ b/brian2/codegen/runtime/weave_rt/templates/lumped_variable.cpp @@ -3,7 +3,7 @@ {% macro main() %} - // USE_SPECIFIERS { _synaptic_post, _synaptic_pre, _num_target_neurons } + // USES_VARIABLES { _synaptic_post, _synaptic_pre, _num_target_neurons } ////// HANDLE DENORMALS /// {% for line in denormals_code_lines %} @@ -27,10 +27,10 @@ _ptr{{_target_var_array}}[_target_idx] = 0.0; // A bit confusing: The "neuron" index here refers to the synapses! - for(int _neuron_idx=0; _neuron_idx<_num_synaptic_post; _neuron_idx++) + for(int _idx=0; _idx<_num_synaptic_post; _idx++) { - const int _postsynaptic_idx = _synaptic_post[_neuron_idx]; - const int _presynaptic_idx = _synaptic_pre[_neuron_idx]; + const int _postsynaptic_idx = _synaptic_post[_idx]; + const int _presynaptic_idx = _synaptic_pre[_idx]; {% for line in code_lines %} {{line}} {% endfor %} diff --git a/brian2/codegen/runtime/weave_rt/templates/ratemonitor.cpp b/brian2/codegen/runtime/weave_rt/templates/ratemonitor.cpp index f2dd28133..9d43f00c1 100644 --- a/brian2/codegen/runtime/weave_rt/templates/ratemonitor.cpp +++ b/brian2/codegen/runtime/weave_rt/templates/ratemonitor.cpp @@ -1,6 +1,6 @@ {% macro main() %} - // USE_SPECIFIERS { _t, _rate, t, dt, _spikes } + // USES_VARIABLES { _t, _rate, t, dt, _spikes } // Calculate the new length for the arrays const npy_int _new_len = (npy_int)(_t.attr("shape")[0]) + 1; diff --git a/brian2/codegen/runtime/weave_rt/templates/reset.cpp b/brian2/codegen/runtime/weave_rt/templates/reset.cpp index 6ce14e91d..d413b53bb 100644 --- a/brian2/codegen/runtime/weave_rt/templates/reset.cpp +++ b/brian2/codegen/runtime/weave_rt/templates/reset.cpp @@ -2,7 +2,7 @@ //// MAIN CODE ///////////////////////////////////////////////////////////// {% macro main() %} - // USE_SPECIFIERS { _spikes } + // USES_VARIABLES { _spikes } ////// HANDLE DENORMALS /// {% for line in denormals_code_lines %} @@ -20,10 +20,10 @@ {% endfor %} //// MAIN CODE //////////// - for(int _index__spikes=0; _index__spikes<_num_spikes; _index__spikes++) + for(int _index_spikes=0; _index_spikes<_num_spikes; _index_spikes++) { - const int _neuron_idx = _spikes[_index__spikes]; - const int _vectorisation_idx = _neuron_idx; + const int _idx = _spikes[_index_spikes]; + const int _vectorisation_idx = _idx; {% for line in code_lines %} {{line}} {% endfor %} diff --git a/brian2/codegen/runtime/weave_rt/templates/spikemonitor.cpp b/brian2/codegen/runtime/weave_rt/templates/spikemonitor.cpp index f54c40478..d580b98b8 100644 --- a/brian2/codegen/runtime/weave_rt/templates/spikemonitor.cpp +++ b/brian2/codegen/runtime/weave_rt/templates/spikemonitor.cpp @@ -1,28 +1,55 @@ {% macro main() %} - // USE_SPECIFIERS { _t, _i, t, _spikes, _count } + // USES_VARIABLES { _t, _i, t, _spikes, _count, + // _source_start, _source_end} if (_num_spikes > 0) { - // Get the current length and new length of t and i arrays - const int _curlen = _t.attr("shape")[0]; - const int _newlen = _curlen + _num_spikes; - // Resize the arrays - py::tuple _newlen_tuple(1); - _newlen_tuple[0] = _newlen; - _t.mcall("resize", _newlen_tuple); - _i.mcall("resize", _newlen_tuple); - // Get the potentially newly created underlying data arrays - double *_t_data = (double*)(((PyArrayObject*)(PyObject*)_t.attr("data"))->data); - // TODO: How to get the correct datatype automatically here? - npy_int64 *_i_data = (npy_int64*)(((PyArrayObject*)(PyObject*)_i.attr("data"))->data); - // Copy the values across - for(int _idx=0; _idx<_num_spikes; _idx++) + // For subgroups, we do not want to record all spikes + // We assume that spikes are ordered + // TODO: Will this assumption ever be violated? + int _start_idx = 0; + int _end_idx = - 1; + for(int _i=0; _i<_num_spikes; _i++) { - const int _neuron_idx = _spikes[_idx]; - _t_data[_curlen + _idx] = t; - _i_data[_curlen + _idx] = _neuron_idx; - _count[_neuron_idx]++; + const int _idx = _spikes[_i]; + if (_idx >= _source_start) { + _start_idx = _i; + break; + } + } + for(int _i=_start_idx; _i<_num_spikes; _i++) + { + const int _idx = _spikes[_i]; + if (_idx >= _source_end) { + _end_idx = _i; + break; + } + } + if (_end_idx == -1) + _end_idx =_num_spikes; + _num_spikes = _end_idx - _start_idx; + if (_num_spikes > 0) { + // Get the current length and new length of t and i arrays + const int _curlen = _t.attr("shape")[0]; + const int _newlen = _curlen + _num_spikes; + // Resize the arrays + py::tuple _newlen_tuple(1); + _newlen_tuple[0] = _newlen; + _t.mcall("resize", _newlen_tuple); + _i.mcall("resize", _newlen_tuple); + // Get the potentially newly created underlying data arrays + double *_t_data = (double*)(((PyArrayObject*)(PyObject*)_t.attr("data"))->data); + // TODO: How to get the correct datatype automatically here? + npy_int64 *_i_data = (npy_int64*)(((PyArrayObject*)(PyObject*)_i.attr("data"))->data); + // Copy the values across + for(int _i=_start_idx; _i<_end_idx; _i++) + { + const int _idx = _spikes[_i]; + _t_data[_curlen + _i - _start_idx] = t; + _i_data[_curlen + _i - _start_idx] = _idx - _source_start; + _count[_idx - _source_start]++; + } } } {% endmacro %} diff --git a/brian2/codegen/runtime/weave_rt/templates/state_variable_indexing.cpp b/brian2/codegen/runtime/weave_rt/templates/state_variable_indexing.cpp index b278d1efa..19706093b 100644 --- a/brian2/codegen/runtime/weave_rt/templates/state_variable_indexing.cpp +++ b/brian2/codegen/runtime/weave_rt/templates/state_variable_indexing.cpp @@ -2,7 +2,6 @@ //// MAIN CODE ///////////////////////////////////////////////////////////// {% macro main() %} - // USE_SPECIFIERS { _num_neurons } ////// SUPPORT CODE /////// {% for line in support_code_lines %} // {{line}} @@ -26,15 +25,15 @@ //// MAIN CODE //////////// int _cpp_numelements = 0; // Container for all the potential indices - npy_int *_elements = (npy_int *)malloc(sizeof(npy_int) * _num_neurons); - for(int _neuron_idx=0; _neuron_idx<_num_neurons; _neuron_idx++) + npy_int *_elements = (npy_int *)malloc(sizeof(npy_int) * _num_idx); + for(int _idx=0; _idx<_num_idx; _idx++) { - const int _vectorisation_idx = _neuron_idx; + const int _vectorisation_idx = _idx; {% for line in code_lines %} {{line}} {% endfor %} if(_cond) { - _elements[_cpp_numelements++] = _neuron_idx; + _elements[_cpp_numelements++] = _idx; } } npy_intp _dims[] = {_cpp_numelements}; diff --git a/brian2/codegen/runtime/weave_rt/templates/statemonitor.cpp b/brian2/codegen/runtime/weave_rt/templates/statemonitor.cpp index 0dbeb8d04..dc3336b91 100644 --- a/brian2/codegen/runtime/weave_rt/templates/statemonitor.cpp +++ b/brian2/codegen/runtime/weave_rt/templates/statemonitor.cpp @@ -1,6 +1,6 @@ {% macro main() %} - // USE_SPECIFIERS { _t, _clock_t, _indices } + // USES_VARIABLES { _t, _clock_t, _indices } ////// SUPPORT CODE /// {% for line in support_code_lines %} @@ -42,16 +42,16 @@ { PyArrayObject *_record_data = (((PyArrayObject*)(PyObject*)_recorded_{{_varname}}.attr("data"))); const npy_intp* _record_strides = _record_data->strides; - for (int _idx=0; _idx < _num_indices; _idx++) + for (int _i = 0; _i < _num_indices; _i++) { - const int _neuron_idx = _indices[_idx]; - const int _vectorisation_idx = _neuron_idx; + const int _idx = _indices[_i]; + const int _vectorisation_idx = _idx; {% for line in code_lines %} {{line}} {% endfor %} // FIXME: This will not work for variables with other data types - double *recorded_entry = (double*)(_record_data->data + (_new_len - 1)*_record_strides[0] + _idx*_record_strides[1]); + double *recorded_entry = (double*)(_record_data->data + (_new_len - 1)*_record_strides[0] + _i*_record_strides[1]); *recorded_entry = _to_record_{{_varname}}; } } diff --git a/brian2/codegen/runtime/weave_rt/templates/stateupdate.cpp b/brian2/codegen/runtime/weave_rt/templates/stateupdate.cpp index bc985a017..6c08106b8 100644 --- a/brian2/codegen/runtime/weave_rt/templates/stateupdate.cpp +++ b/brian2/codegen/runtime/weave_rt/templates/stateupdate.cpp @@ -2,7 +2,6 @@ //// MAIN CODE ///////////////////////////////////////////////////////////// {% macro main() %} - // USE_SPECIFIERS { _num_neurons } ////// SUPPORT CODE /// {% for line in support_code_lines %} @@ -25,9 +24,9 @@ {% endfor %} //// MAIN CODE //////////// - for(int _neuron_idx=0; _neuron_idx<_num_neurons; _neuron_idx++) + for(int _idx=0; _idx<_num_idx; _idx++) { - const int _vectorisation_idx = _neuron_idx; + const int _vectorisation_idx = _idx; {% for line in code_lines %} {{line}} {% endfor %} diff --git a/brian2/codegen/runtime/weave_rt/templates/synapses.cpp b/brian2/codegen/runtime/weave_rt/templates/synapses.cpp index 07615828d..eab99f972 100644 --- a/brian2/codegen/runtime/weave_rt/templates/synapses.cpp +++ b/brian2/codegen/runtime/weave_rt/templates/synapses.cpp @@ -2,7 +2,8 @@ //// MAIN CODE ///////////////////////////////////////////////////////////// {% macro main() %} - // USE_SPECIFIERS { _spiking_synapses, _synaptic_pre,_synaptic_post } + // USES_VARIABLES { _spiking_synapses, _synaptic_pre, _synaptic_post, + // _source_offset, _target_offset} //// SUPPORT CODE ////////////////////////////////////////////////////////// {% for line in support_code_lines %} @@ -29,10 +30,10 @@ _spiking_synapse_idx<_num_spiking_synapses; _spiking_synapse_idx++) { - const int _neuron_idx = _spiking_synapses[_spiking_synapse_idx]; - const int _postsynaptic_idx = _synaptic_post[_neuron_idx]; - const int _presynaptic_idx = _synaptic_pre[_neuron_idx]; - const int _vectorisation_idx = _neuron_idx; + const int _idx = _spiking_synapses[_spiking_synapse_idx]; + const int _postsynaptic_idx = _synaptic_post[_idx] + _target_offset; + const int _presynaptic_idx = _synaptic_pre[_idx] + _source_offset; + const int _vectorisation_idx = _idx; {% for line in code_lines %} {{line}} {% endfor %} diff --git a/brian2/codegen/runtime/weave_rt/templates/synapses_create.cpp b/brian2/codegen/runtime/weave_rt/templates/synapses_create.cpp index f60d4f335..49ba09187 100644 --- a/brian2/codegen/runtime/weave_rt/templates/synapses_create.cpp +++ b/brian2/codegen/runtime/weave_rt/templates/synapses_create.cpp @@ -1,6 +1,6 @@ {% macro main() %} - // USE_SPECIFIERS { _synaptic_pre, _synaptic_post, _post_synaptic, - // _pre_synaptic, _num_source_neurons, _num_target_neurons, + // USES_VARIABLES { _synaptic_pre, _synaptic_post, _post_synaptic, + // _pre_synaptic, _source_neurons, _target_neurons, // rand} //// SUPPORT CODE ////////////////////////////////////////////////////////// @@ -51,8 +51,8 @@ } for (int _repetition=0; _repetition<_n; _repetition++) { - _prebuf[_curbuf] = i; - _postbuf[_curbuf] = j; + _prebuf[_curbuf] = _source_neurons[i]; + _postbuf[_curbuf] = _target_neurons[j]; _curbuf++; // Flush buffer if(_curbuf==_buffer_size) @@ -65,8 +65,8 @@ // mapping _synprebuf[0] = _synapse_idx; _synpostbuf[0] = _synapse_idx; - py::object _pre_synapses = (py::object)PyList_GetItem(_pre_synaptic, i); - py::object _post_synapses = (py::object)PyList_GetItem(_post_synaptic, j); + py::object _pre_synapses = (py::object)PyList_GetItem(_pre_synaptic, _source_neurons[i]); + py::object _post_synapses = (py::object)PyList_GetItem(_post_synaptic, _target_neurons[j]); _flush_buffer(_synprebuf, _pre_synapses, 1); _flush_buffer(_synpostbuf, _post_synapses, 1); _synapse_idx++; diff --git a/brian2/codegen/runtime/weave_rt/templates/threshold.cpp b/brian2/codegen/runtime/weave_rt/templates/threshold.cpp index eb9b2432b..6066a6827 100644 --- a/brian2/codegen/runtime/weave_rt/templates/threshold.cpp +++ b/brian2/codegen/runtime/weave_rt/templates/threshold.cpp @@ -2,7 +2,7 @@ //// MAIN CODE ///////////////////////////////////////////////////////////// {% macro main() %} - // USE_SPECIFIERS { _num_neurons, not_refractory, lastspike, t } + // USES_VARIABLES { not_refractory, lastspike, t } ////// SUPPORT CODE /////// {% for line in support_code_lines %} // {{line}} @@ -25,20 +25,20 @@ //// MAIN CODE //////////// int _cpp_numspikes = 0; - npy_int32 *_spikes_space = (npy_int32 *)malloc(sizeof(npy_int32) * _num_neurons); - for(int _neuron_idx=0; _neuron_idx<_num_neurons; _neuron_idx++) + npy_int32 *_spikes_space = (npy_int32 *)malloc(sizeof(npy_int32) * _num_idx); + for(int _idx=0; _idx<_num_idx; _idx++) { - const int _vectorisation_idx = _neuron_idx; + const int _vectorisation_idx = _idx; {% for line in code_lines %} {{line}} {% endfor %} if(_cond) { - _spikes_space[_cpp_numspikes++] = _neuron_idx; + _spikes_space[_cpp_numspikes++] = _idx; // We have to use the pointer names directly here: The condition // might contain references to not_refractory or lastspike and in // that case the names will refer to a single entry. - _ptr{{_array_not_refractory}}[_neuron_idx] = false; - _ptr{{_array_lastspike}}[_neuron_idx] = t; + _ptr{{_array_not_refractory}}[_idx] = false; + _ptr{{_array_lastspike}}[_idx] = t; } } npy_intp _dims[] = {_cpp_numspikes}; diff --git a/brian2/codegen/runtime/weave_rt/weave_rt.py b/brian2/codegen/runtime/weave_rt/weave_rt.py index 6287c554d..7f22ddf2b 100644 --- a/brian2/codegen/runtime/weave_rt/weave_rt.py +++ b/brian2/codegen/runtime/weave_rt/weave_rt.py @@ -47,8 +47,8 @@ class WeaveCodeObject(CodeObject): 'templates')) language = CPPLanguage() - def __init__(self, code, namespace, specifiers): - super(WeaveCodeObject, self).__init__(code, namespace, specifiers) + def __init__(self, code, namespace, variables): + super(WeaveCodeObject, self).__init__(code, namespace, variables) self.compiler = brian_prefs['codegen.runtime.weave.compiler'] self.extra_compile_args = brian_prefs['codegen.runtime.weave.extra_compile_args'] diff --git a/brian2/codegen/templates.py b/brian2/codegen/templates.py index a2210ad88..3e8517508 100644 --- a/brian2/codegen/templates.py +++ b/brian2/codegen/templates.py @@ -36,14 +36,21 @@ def __init__(self, template): self.words = set([]) for v in temps: self.words.update(get_identifiers(v)) - #: The set of specifiers in this template - self.specifiers = set([]) + #: The set of variables in this template + self.variables = set([]) + #: The indices over which the template iterates completely + self.iterate_all = set([]) for v in temps: - # This is the bit inside {} for USE_SPECIFIERS { list of words } - specifier_blocks = re.findall(r'\bUSE_SPECIFIERS\b\s*\{(.*?)\}', + # This is the bit inside {} for USES_VARIABLES { list of words } + specifier_blocks = re.findall(r'\bUSES_VARIABLES\b\s*\{(.*?)\}', v, re.M|re.S) + # Same for ITERATE_ALL + iterate_all_blocks = re.findall(r'\bITERATE_ALL\b\s*\{(.*?)\}', + v, re.M|re.S) for block in specifier_blocks: - self.specifiers.update(get_identifiers(block)) + self.variables.update(get_identifiers(block)) + for block in iterate_all_blocks: + self.iterate_all.update(get_identifiers(block)) def __call__(self, code_lines, **kwds): kwds['code_lines'] = code_lines diff --git a/brian2/codegen/translation.py b/brian2/codegen/translation.py index eb25d5f6a..911f6844b 100644 --- a/brian2/codegen/translation.py +++ b/brian2/codegen/translation.py @@ -19,8 +19,8 @@ from numpy import float64 -from brian2.core.specifiers import Value, ArrayVariable, Subexpression, Index -from brian2.utils.stringtools import (deindent, strip_empty_lines, indent, +from brian2.core.variables import Variable, Subexpression +from brian2.utils.stringtools import (deindent, strip_empty_lines, get_identifiers) from .statements import Statement @@ -44,7 +44,7 @@ def __init__(self, **kwds): STANDARD_IDENTIFIERS = set(['and', 'or', 'not', 'True', 'False']) -def analyse_identifiers(code, specifiers, recursive=False): +def analyse_identifiers(code, variables, recursive=False): ''' Analyses a code string (sequence of statements) to find all identifiers by type. @@ -60,7 +60,7 @@ def analyse_identifiers(code, specifiers, recursive=False): ---------- code : str The code string, a sequence of statements one per line. - specifiers : dict of `Specifier`, set of names + variables : dict of `Variable`, set of names Specifiers for the model variables or a set of known names recursive : bool, optional Whether to recurse down into subexpressions (defaults to ``False``). @@ -77,19 +77,19 @@ def analyse_identifiers(code, specifiers, recursive=False): it and not previously known. Should correspond to variables in the external namespace. ''' - if isinstance(specifiers, collections.Mapping): - known = set(specifiers.keys()) + if isinstance(variables, collections.Mapping): + known = set(variables.keys()) else: - known = set(specifiers) - specifiers = dict((k, Value(k, 1, float64)) for k in known) + known = set(variables) + variables = dict((k, Variable(unit=None)) for k in known) known |= STANDARD_IDENTIFIERS - stmts = make_statements(code, specifiers, float64) + stmts = make_statements(code, variables, float64) defined = set(stmt.var for stmt in stmts if stmt.op==':=') if recursive: - if not isinstance(specifiers, collections.Mapping): - raise TypeError('Have to specify a specifiers dictionary.') - allids = get_identifiers_recursively(code, specifiers) + if not isinstance(variables, collections.Mapping): + raise TypeError('Have to specify a variables dictionary.') + allids = get_identifiers_recursively(code, variables) else: allids = get_identifiers(code) dependent = allids.difference(defined, known) @@ -98,20 +98,20 @@ def analyse_identifiers(code, specifiers, recursive=False): return defined, used_known, dependent -def get_identifiers_recursively(expr, specifiers): +def get_identifiers_recursively(expr, variables): ''' Gets all the identifiers in a code, recursing down into subexpressions. ''' identifiers = get_identifiers(expr) for name in set(identifiers): - if name in specifiers and isinstance(specifiers[name], Subexpression): - s_identifiers = get_identifiers_recursively(specifiers[name].expr, - specifiers) + if name in variables and isinstance(variables[name], Subexpression): + s_identifiers = get_identifiers_recursively(variables[name].expr, + variables) identifiers |= s_identifiers return identifiers -def make_statements(code, specifiers, dtype): +def make_statements(code, variables, dtype): ''' Turn a series of abstract code statements into Statement objects, inferring whether each line is a set/declare operation, whether the variables are @@ -125,11 +125,10 @@ def make_statements(code, specifiers, dtype): if DEBUG: print 'INPUT CODE:' print code - dtypes = dict((name, value.dtype) for name, value in specifiers.items() if hasattr(value, 'dtype')) + dtypes = dict((name, var.dtype) for name, var in variables.iteritems()) # we will do inference to work out which lines are := and which are = - #defined = set(specifiers.keys()) # variables which are already defined - defined = set(var for var, spec in specifiers.items() - if hasattr(spec, 'get_value')) + defined = set(variables.keys()) + for line in lines: # parse statement into "var op expr" var, op, expr = parse_statement(line.code) @@ -143,7 +142,7 @@ def make_statements(code, specifiers, dtype): # for each line will give the variable being written to line.write = var # each line will give a set of variables which are read - line.read = get_identifiers_recursively(expr, specifiers) + line.read = get_identifiers_recursively(expr, variables) if DEBUG: print 'PARSED STATEMENTS:' @@ -181,7 +180,7 @@ def make_statements(code, specifiers, dtype): # as invalid, and are invalidated whenever one of the variables appearing # in the RHS changes value. #subexpressions = get_all_subexpressions() - subexpressions = dict((name, val) for name, val in specifiers.items() if isinstance(val, Subexpression)) + subexpressions = dict((name, val) for name, val in variables.items() if isinstance(val, Subexpression)) if DEBUG: print 'SUBEXPRESSIONS:', subexpressions.keys() statements = [] @@ -239,15 +238,16 @@ def make_statements(code, specifiers, dtype): return statements -def translate(code, specifiers, namespace, dtype, language, indices): +def translate(code, variables, namespace, dtype, language, + variable_indices, iterate_all): ''' Translates an abstract code block into the target language. ``code`` The abstract code block, a series of one-line statements. - ``specifiers`` + ``variables`` A dict of ``(var, spec)`` where ``var`` is a variable name whose type - is specified by ``spec``, a `Specifier` object. These include + is specified by ``spec``, a `Variable` object. These include `Value` for a single (non-vector) value that will be inserted into the namespace at runtime, `Function` for a function, `ArrayVariable` for a value coming from an array of values, @@ -255,7 +255,7 @@ def translate(code, specifiers, namespace, dtype, language, indices): `Subexpression` for a common subexpression used in the code. There should only be a single `Index` specifier, and the name should correspond to that given in the `ArrayVariable` - specifiers. + variables. ``dtype`` The default dtype for newly created variables (usually float64). ``language`` @@ -263,52 +263,7 @@ def translate(code, specifiers, namespace, dtype, language, indices): Returns a multi-line string. ''' - statements = make_statements(code, specifiers, dtype) - return language.translate_statement_sequence(statements, specifiers, namespace, indices) - - -if __name__=='__main__': - from numpy import float64 - from languages import CLanguage, PythonLanguage, NumexprPythonLanguage - DEBUG = True - # switch between these two to invalidate x on the last line - if 1: - # x invalid on last line - code = ''' - _tmp_V = x - I += 1 - V += _tmp_V*x*dt - _cond = V>x - ''' - else: - # x valid on last line - code = ''' - _tmp_V = x - V += _tmp_V*x*dt - ''' - specifiers = { - 'V':ArrayVariable('_array_V', '_neuron_idx', float64), - 'I':ArrayVariable('_array_I', '_neuron_idx', float64), - 'x':Subexpression('-(V+I)/tau'), - 'tau':Value(float64), - 'dt':Value(float64), - '_cond':OutputVariable(bool), - #'_neuron_idx':Index(), - '_neuron_idx':Index(all=False), - } - for lang in [ - CLanguage(), - PythonLanguage(), - NumexprPythonLanguage() - ]: - print lang.__class__.__name__ - print '='*len(lang.__class__.__name__) - output = translate(code, specifiers, float64, lang) - print 'OUTPUT CODE:' - if isinstance(output, str): - print indent(output) - else: - for k, v in output.items(): - print k+':' - print indent(v) - print + statements = make_statements(code, variables, dtype) + return language.translate_statement_sequence(statements, variables, + namespace, variable_indices, + iterate_all) diff --git a/brian2/core/namespace.py b/brian2/core/namespace.py index 6d7bd467f..92d1ed409 100644 --- a/brian2/core/namespace.py +++ b/brian2/core/namespace.py @@ -99,7 +99,7 @@ def _same_function(func1, func2): ''' Helper function, used during namespace resolution for comparing wether to functions are the same. This takes care of treating a function and a - `Function` specifiers whose `Function.pyfunc` attribute matches as the + `Function` variables whose `Function.pyfunc` attribute matches as the same. This prevents the user from getting spurious warnings when having for example a numpy function such as :np:func:`~random.randn` in the local namespace, while the ``randn`` symbol in the numpy namespace used for the diff --git a/brian2/core/specifiers.py b/brian2/core/specifiers.py deleted file mode 100644 index 2326fcb53..000000000 --- a/brian2/core/specifiers.py +++ /dev/null @@ -1,617 +0,0 @@ -''' -Classes used to specify the type of a function, variable or common sub-expression - -TODO: have a single global dtype rather than specify for each variable? -''' -import numpy as np - -from brian2.units.allunits import second - -from brian2.utils.stringtools import get_identifiers -from brian2.units.fundamentalunits import (Quantity, Unit, is_scalar_type, - fail_for_dimension_mismatch, - have_same_dimensions) - -__all__ = ['Specifier', - 'VariableSpecifier', - 'Value', - 'ReadOnlyValue', - 'StochasticVariable', - 'AttributeValue' - 'ArrayVariable', - 'Subexpression', - 'Index', - ] - - -############################################################################### -# Parent classes -############################################################################### -class Specifier(object): - ''' - An object providing information about parts of a model (e.g. variables). - `Specifier` objects are used both to store the information within the model - (and allow for things like unit checking) and are passed on to code - generation to specify properties like the dtype. - - This class is only used as a parent class for more concrete specifiers. - - Parameters - ---------- - name : str - The name of the specifier (e.g. the name of the model variable) - ''' - - def __init__(self, name): - #: The name of the thing being specified (e.g. the model variable) - self.name = name - - def __repr__(self): - return '%s(name=%r)' % (self.__class__.__name__, self.name) - - -class VariableSpecifier(Specifier): - ''' - An object providing information about model variables (including implicit - variables such as ``t`` or ``xi``). - - Parameters - ---------- - name : str - The name of the variable. - unit : `Unit` - The unit of the variable - scalar : bool, optional - Whether the variable is a scalar value (``True``) or vector-valued, i.e. - defined for every neuron (``False``). Defaults to ``True``. - constant: bool, optional - Whether the value of this variable can change during a run. Defaults - to ``False``. - is_bool: bool, optional - Whether this is a boolean variable (also implies it is dimensionless). - Defaults to ``False`` - See Also - -------- - Value - ''' - def __init__(self, name, unit, scalar=True, constant=False, is_bool=False): - Specifier.__init__(self, name) - - #: The variable's unit. - self.unit = unit - - #: Whether this is a boolean variable - self.is_bool = is_bool - - if is_bool: - if not have_same_dimensions(unit, 1): - raise ValueError('Boolean variables can only be dimensionless') - - #: Whether the value is a scalar - self.scalar = scalar - - #: Whether the value is constant during a run - self.constant = constant - - - def __repr__(self): - description = ('{classname}(name={name}, unit={unit}, scalar={scalar}, ' - 'constant={constant})') - return description.format(classname=self.__class__.__name__, - name=repr(self.name), - unit=repr(self.unit), - scalar=repr(self.scalar), - constant=repr(self.constant)) - -class Value(VariableSpecifier): - ''' - An object providing information about model variables that have an - associated value in the model. - - Some variables, for example stochastic variables, are not stored anywhere - in the model itself. They would therefore be represented by a specifier - that is *not* derived from `Value` but from `VariableSpecifier`. - - Parameters - ---------- - name : str - The name of the variable. - unit : `Unit` - The unit of the variable - dtype: `numpy.dtype` - The dtype used for storing the variable. - scalar : bool, optional - Whether the variable is a scalar value (``True``) or vector-valued, i.e. - defined for every neuron (``False``). Defaults to ``True``. - constant: bool, optional - Whether the value of this variable can change during a run. Defaults - to ``False``. - is_bool: bool, optional - Whether this is a boolean variable (also implies it is dimensionless). - Defaults to ``False`` - - ''' - def __init__(self, name, unit, dtype, scalar=True, constant=False, - is_bool=False): - VariableSpecifier.__init__(self, name, unit, scalar, constant, is_bool) - #: The dtype used for storing the variable. - self.dtype = dtype - - def get_value(self): - ''' - Return the value associated with the variable. - ''' - raise NotImplementedError() - - def get_value_with_unit(self): - return Quantity(self.get_value(), self.unit.dimensions) - - def get_addressable_value(self, level=0): - return self.get_value() - - def get_addressable_value_with_unit(self, level=0): - return self.get_value_with_unit() - - def get_len(self): - if self.scalar: - return 0 - else: - return len(self.get_value()) - - def set_value(self): - ''' - Set the value associated with the variable. - ''' - raise NotImplementedError() - - def __repr__(self): - description = ('{classname}(name={name}, unit={unit}, dtype={dtype}, ' - 'scalar={scalar}, constant={constant})') - return description.format(classname=self.__class__.__name__, - name=repr(self.name), - unit=repr(self.unit), - dtype=repr(self.dtype), - scalar=repr(self.scalar), - constant=repr(self.constant)) - -############################################################################### -# Concrete classes that are used as specifiers in practice. -############################################################################### - -class ReadOnlyValue(Value): - ''' - An object providing information about a model variable that can only be - read (e.g. the length of a `NeuronGroup`). It is assumed that the value - does never change, for changing values use `AttributeValue` instead. - - Parameters - ---------- - name : str - The name of the variable. - unit : `Unit` - The unit of the variable - dtype: `numpy.dtype` - The dtype used for storing the variable. - value : reference to a value of type `dtype` - Reference to the variable's value - - Raises - ------ - TypeError - When trying to use the `set_value` method. - ''' - def __init__(self, name, unit, dtype, value): - #: Reference to the variable's value - self.value = value - - scalar = is_scalar_type(value) - - is_bool = value is True or value is False - - Value.__init__(self, name, unit, dtype, scalar, constant=True, - is_bool=is_bool) - - def get_value(self): - return self.value - - def set_value(self): - raise TypeError('The value "%s" is read-only' % self.name) - - def __repr__(self): - description = ('{classname}(name={name}, unit={unit}, dtype={dtype}, ' - 'value={value}') - return description.format(classname=self.__class__.__name__, - name=repr(self.name), - unit=repr(self.unit), - dtype=repr(self.dtype), - value=repr(self.value)) - - -class StochasticVariable(VariableSpecifier): - ''' - An object providing information about a stochastic variable. Automatically - sets the unit to ``second**-.5``. - - Parameters - ---------- - name : str - The name of the stochastic variable. - ''' - def __init__(self, name): - # The units of stochastic variables is fixed - VariableSpecifier.__init__(self, name, second**(-.5), scalar=False) - - -class AttributeValue(ReadOnlyValue): - ''' - An object providing information about a value saved as an attribute of an - object. Instead of saving a reference to the value itself, we save the - name of the attribute. This way, we get the correct value if the attribute - is overwritten with a new value (e.g. in the case of ``clock.t_``) - - The object value has to be accessible by doing ``getattr(obj, attribute)``. - - Parameters - ---------- - name : str - The name of the variable. - unit : `Unit` - The unit of the variable - dtype: `numpy.dtype` - The dtype used for storing the variable. - obj : object - The object storing the variable's value (e.g. a `NeuronGroup`). - attribute : str - The name of the attribute storing the variable's value. `attribute` has - to be an attribute of `obj`. - constant : bool, optional - Whether the attribute's value is constant during a run. - is_bool: bool, optional - Whether this is a boolean variable (also implies it is dimensionless). - Defaults to ``False`` - Raises - ------ - AttributeError - If `obj` does not have an attribute `attribute`. - - ''' - def __init__(self, name, unit, dtype, obj, attribute, constant=False, - is_bool=False): - if not hasattr(obj, attribute): - raise AttributeError(('Object %r does not have an attribute %r, ' - 'providing the value for %r') % - (obj, attribute, name)) - - value = getattr(obj, attribute) - scalar = is_scalar_type(value) - - is_bool = value is True or value is False - - Value.__init__(self, name, unit, dtype, scalar, constant, is_bool) - #: A reference to the object storing the variable's value - self.obj = obj - #: The name of the attribute storing the variable's value - self.attribute = attribute - - def get_value(self): - return getattr(self.obj, self.attribute) - - def __repr__(self): - description = ('{classname}(name={name}, unit={unit}, dtype={dtype}, ' - 'obj={obj}, attribute={attribute}, constant={constant})') - return description.format(classname=self.__class__.__name__, - name=repr(self.name), - unit=repr(self.unit), - dtype=repr(self.dtype), - obj=repr(self.obj), - attribute=repr(self.attribute), - constant=repr(self.constant)) - - -class VariableView(object): - - def __init__(self, specifier, group, unit=None, level=0): - self.specifier = specifier - self.group = group - self.unit = unit - self.level = level - -# data = property(lambda self: self.specifier.get_value()) - - def __getitem__(self, i): - spec = self.specifier - if spec.scalar: - if not (i == slice(None) or i == 0 or (hasattr(i, '__len__') and len(i) == 0)): - print 'index', repr(i) - raise IndexError('Variable %s is a scalar variable.' % spec.name) - indices = 0 - else: - indices = self.group.indices[i] - if self.unit is None or have_same_dimensions(self.unit, Unit(1)): - return spec.get_value()[indices] - else: - return Quantity(spec.get_value()[indices], self.unit.dimensions) - - def __setitem__(self, i, value): - spec = self.specifier - if spec.scalar: - if not (i == slice(None) or i == 0 or (hasattr(i, '__len__') and len(i) == 0)): - raise IndexError('Variable %s is a scalar variable.' % spec.name) - indices = np.array([0]) - else: - indices = self.group.indices[i] - if isinstance(value, basestring): - check_units = self.unit is not None - self.group._set_with_code(spec, indices, value, - check_units, level=self.level + 1) - else: - if not self.unit is None: - fail_for_dimension_mismatch(value, self.unit) - self.specifier.array[indices] = value - - def __array__(self, dtype=None): - if dtype is not None and dtype != self.specifier.dtype: - raise NotImplementedError('Changing dtype not supported') - return self[:] - - def __add__(self, other): - return self[:] + other - - def __sub__(self, other): - return self[:] - other - - def __mul__(self, other): - return self[:] * other - - def __div__(self, other): - return self[:] / other - - def __iadd__(self, other): - if isinstance(other, basestring): - rhs = self.specifier.name + ' + ' + other - else: - rhs = self[:] + other - self[:] = rhs - return self - - def __isub__(self, other): - if isinstance(other, basestring): - rhs = self.specifier.name + ' - ' + other - else: - rhs = self[:] - other - self[:] = rhs - return self - - def __imul__(self, other): - if isinstance(other, basestring): - rhs = self.specifier.name + ' * (' + other + ')' - else: - rhs = self[:] * other - self[:] = rhs - return self - - def __idiv__(self, other): - if isinstance(other, basestring): - rhs = self.specifier.name + ' / (' + other + ')' - else: - rhs = self[:] / other - self[:] = rhs - return self - - def __repr__(self): - if self.unit is None or have_same_dimensions(self.unit, Unit(1)): - return '<%s.%s_: %r>' % (self.group.name, self.specifier.name, - self.specifier.get_value()) - else: - return '<%s.%s: %r>' % (self.group.name, self.specifier.name, - Quantity(self.specifier.get_value(), - self.unit.dimensions)) - - -class ArrayVariable(Value): - ''' - An object providing information about a model variable stored in an array - (for example, all state variables). The `index` will be used in the - generated code (at least in languages such as C++, where the code always - loops over arrays). Stores a reference to the array name used in the - generated code, constructed as ``'_array_'`` + ``name``. - - For example, for:: - - ``v = ArrayVariable('_array_v', volt, float64, group.arrays['v'], '_index')`` - - we would eventually produce C++ code that looked like:: - - double &v = _array_v[_index]; - - Parameters - ---------- - name : str - The name of the variable. - unit : `Unit` - The unit of the variable - dtype : `numpy.dtype` - The dtype used for storing the variable. - array : `numpy.array` - A reference to the array storing the data for the variable. - index : str - The index that will be used in the generated code when looping over the - variable. - constant : bool, optional - Whether the variable's value is constant during a run. - scalar : bool, optional - Whether this array is a 1-element array that should be treated like a - scalar (e.g. for a single delay value across synapses) - is_bool: bool, optional - Whether this is a boolean variable (also implies it is dimensionless). - Defaults to ``False`` - ''' - def __init__(self, name, unit, dtype, array, index, group=None, - constant=False, scalar=False, is_bool=False): - - self.group = group - - if is_bool: - if not dtype == np.bool: - raise ValueError(('Boolean variables have to be stored with ' - 'boolean dtype')) - Value.__init__(self, name, unit, dtype, scalar=scalar, - constant=constant, is_bool=is_bool) - #: The reference to the array storing the data for the variable. - self.array = array - #: The name for the array used in generated code - groupname = '_'+group.name+'_' if group is not None else '_' - self.arrayname = '_array' + groupname + self.name - #: The name of the index that will be used in the generated code. - self.index = index - - def get_value(self): - return self.array - - def set_value(self, value): - self.array[:] = value - - def get_addressable_value(self, level=0): - return VariableView(self, self.group, None, level) - - def get_addressable_value_with_unit(self, level=0): - return VariableView(self, self.group, self.unit, level) - - def __repr__(self): - description = ('<{classname}(name={name}, unit={unit}, dtype={dtype}, ' - 'array=<...>, index={index}, constant={constant})>') - return description.format(classname=self.__class__.__name__, - name=repr(self.name), - unit=repr(self.unit), - dtype=repr(self.dtype), - index=repr(self.index), - constant=self.constant) - - -class DynamicArrayVariable(ArrayVariable): - ''' - An object providing information about a model variable stored in a dynamic - array (used in synapses). - ''' - - def get_value(self): - # The actual numpy array is accesible via DynamicArray1D.data - return self.array.data - - -class SynapticArrayVariable(DynamicArrayVariable): - - def __init__(self, name, unit, dtype, array, index, synapses, - constant=False, is_bool=False): - ArrayVariable.__init__(self, name, unit, dtype, array, index, synapses, - constant=constant, is_bool=is_bool) - # Register the object with the `SynapticIndex` object so it gets - # automatically resized - synapses.indices.register_variable(self.array) - - def get_addressable_value(self, level=0): - return VariableView(self, self.group, None, level) - - def get_addressable_value_with_unit(self, level=0): - return VariableView(self, self.group, self.unit, level) - - -class Subexpression(Value): - ''' - An object providing information about a static equation in a model - definition, used as a hint in optimising. Can test if a variable is used - via ``var in spec``. The specifier is also able to return the result of - the expression (used in a `StateMonitor`, for example). - - Parameters - ---------- - name : str - The name of the static equation. - unit : `Unit` - The unit of the static equation - dtype : `numpy.dtype` - The dtype used for the expression. - expr : str - The expression defining the static equation. - specifiers : dict - The specifiers dictionary, containing specifiers for the - model variables used in the expression - namespace : dict - The namespace dictionary, containing identifiers for all the external - variables/functions used in the expression - is_bool: bool, optional - Whether this is a boolean variable (also implies it is dimensionless). - Defaults to ``False`` - ''' - def __init__(self, name, unit, dtype, expr, specifiers, namespace, - is_bool=False): - Value.__init__(self, name, unit, dtype, scalar=False, is_bool=is_bool) - #: The expression defining the static equation. - self.expr = expr.strip() - #: The identifiers used in the expression - self.identifiers = get_identifiers(expr) - #: Specifiers for the identifiers used in the expression - self.specifiers = specifiers - - #: The NeuronGroup's namespace for the identifiers used in the - #: expression - self.namespace = namespace - - #: An additional namespace provided by the run function (and updated - #: in `NeuronGroup.pre_run`) that is used if the NeuronGroup does not - #: have an explicitly defined namespace. - self.additional_namespace = None - - def get_value(self): - variable_values = {} - for identifier in self.identifiers: - if identifier in self.specifiers: - variable_values[identifier] = self.specifiers[identifier].get_value() - else: - variable_values[identifier] = self.namespace.resolve(identifier, - self.additional_namespace, - strip_units=True) - return eval(self.expr, variable_values) - - def __contains__(self, var): - return var in self.identifiers - - def __repr__(self): - description = ('<{classname}(name={name}, unit={unit}, dtype={dtype}, ' - 'expr={expr}, specifiers=<...>, namespace=<....>)>') - return description.format(classname=self.__class__.__name__, - name=repr(self.name), - unit=repr(self.unit), - dtype=repr(self.dtype), - expr=repr(self.expr)) - - -class Index(Specifier): - ''' - An object describing an index variable. You can specify ``iterate_all=True`` - or ``False`` to say whether it is varying over the whole of an input vector - or a subset. Vectorised langauges (i.e. Python) can use this to optimise the - reading and writing phase (i.e. you can do ``var = arr`` if - ``iterate_all==True`` but you need to ``var = whole[idx]`` if - ``iterate_all==False``). - - Parameters - ---------- - name : str - The name of the index. - iterate_all : bool, optional - Whether the index varies over the whole of an input vector (defaults to - ``True``). - ''' - def __init__(self, name, iterate_all=True): - Specifier.__init__(self, name) - if bool(iterate_all) != iterate_all: - raise ValueError(('The "all" argument has to be a bool, ' - 'is type %s instead' % type(all))) - #: Whether the index varies over the whole of an input vector - self.iterate_all = iterate_all - - def __repr__(self): - return '%s(name=%r, iterate_all=%r)' % (self.__class__.__name__, - self.name, - self.iterate_all) diff --git a/brian2/core/variables.py b/brian2/core/variables.py new file mode 100644 index 000000000..3461962d9 --- /dev/null +++ b/brian2/core/variables.py @@ -0,0 +1,451 @@ +''' +Classes used to specify the type of a function, variable or common sub-expression + +TODO: have a single global dtype rather than specify for each variable? +''' +import numpy as np + +from brian2.units.allunits import second + +from brian2.utils.stringtools import get_identifiers +from brian2.units.fundamentalunits import (Quantity, Unit, is_scalar_type, + fail_for_dimension_mismatch, + have_same_dimensions) + +__all__ = ['Variable', + 'StochasticVariable', + 'AttributeVariable', + 'ArrayVariable', + 'DynamicArrayVariable', + 'Subexpression', + ] + + +def get_dtype(obj): + if hasattr(obj, 'dtype'): + return obj.dtype + else: + return np.obj2sctype(obj) + + +class Variable(object): + ''' + An object providing information about model variables (including implicit + variables such as ``t`` or ``xi``). + + Parameters + ---------- + unit : `Unit` + The unit of the variable. Note that the variable itself (as referenced + by value) should never have units attached. + value: reference to the variable value, optional + Some variables (e.g. stochastic variables) don't have their value + stored anywhere, they'd pass ``None`` as a value. + dtype: `numpy.dtype`, optional + The dtype used for storing the variable. If none is given, tries to + get the dtype from the referred value. + scalar : bool, optional + Whether the variable is a scalar value (``True``) or vector-valued, e.g. + defined for every neuron (``False``). If nothing is specified, + determines the correct setting from the `value`, if that is not given + defaults to ``True``. + constant: bool, optional + Whether the value of this variable can change during a run. Defaults + to ``False``. + is_bool: bool, optional + Whether this is a boolean variable (also implies it is dimensionless). + If specified as ``None`` and a `value` is given, checks the value + itself. If no `value` is given, defaults to ``False``. + ''' + def __init__(self, unit, value=None, dtype=None, scalar=None, + constant=False, is_bool=None): + + #: The variable's unit. + self.unit = unit + + #: reference to a value of type `dtype` + self.value = value + + if dtype is None: + self.dtype = get_dtype(value) + else: + value_dtype = get_dtype(value) + if value is not None and value_dtype != dtype: + raise TypeError(('Conflicting dtype information: ' + 'referred value has dtype %r, not ' + '%r.') % (value_dtype, dtype)) + #: The dtype used for storing the variable. + self.dtype = dtype + + if is_bool is None: + if value is None: + self.is_bool = False + self.is_bool = value is True or value is False + else: + #: Whether this variable is a boolean + self.is_bool = is_bool + + if is_bool: + if not have_same_dimensions(unit, 1): + raise ValueError('Boolean variables can only be dimensionless') + + if scalar is None: + if value is None: + self.scalar = True + self.scalar = is_scalar_type(value) + else: + #: Whether the variable is a scalar + self.scalar = scalar + + #: Whether the variable is constant during a run + self.constant = constant + + def get_value(self): + ''' + Return the value associated with the variable (without units). + ''' + if self.value is None: + raise TypeError('Variable does not have a value') + else: + return self.value + + def set_value(self): + ''' + Set the value associated with the variable. + ''' + raise NotImplementedError() + + def get_value_with_unit(self): + ''' + Return the value associated with the variable (with units). + ''' + return Quantity(self.get_value(), self.unit.dimensions) + + def get_addressable_value(self, level=0): + ''' + Get the value associated with the variable (without units) that allows + for indexing + ''' + return self.get_value() + + def get_addressable_value_with_unit(self, level=0): + ''' + Get the value associated with the variable (with units) that allows + for indexing + ''' + return self.get_value_with_unit() + + def get_len(self): + ''' + Get the length of the value associated with the variable or ``0`` for + a scalar variable. + ''' + if self.scalar: + return 0 + else: + return len(self.get_value()) + + def __repr__(self): + description = ('<{classname}(unit={unit}, value={value}, ' + 'dtype={dtype}, scalar={scalar}, constant={constant})>') + return description.format(classname=self.__class__.__name__, + unit=repr(self.unit), + value='' % type(self.value), + dtype=repr(self.dtype), + scalar=repr(self.scalar), + constant=repr(self.constant)) + + +class StochasticVariable(Variable): + ''' + An object providing information about a stochastic variable. Automatically + sets the unit to ``second**-.5``. + + ''' + def __init__(self): + # The units of stochastic variables is fixed + Variable.__init__(self, second**(-.5), dtype=np.float64, + scalar=False, constant=False, is_bool=False) + + +class AttributeVariable(Variable): + ''' + An object providing information about a value saved as an attribute of an + object. Instead of saving a reference to the value itself, we save the + name of the attribute. This way, we get the correct value if the attribute + is overwritten with a new value (e.g. in the case of ``clock.t_``) + + The object value has to be accessible by doing ``getattr(obj, attribute)``. + + Parameters + ---------- + unit : `Unit` + The unit of the variable + obj : object + The object storing the variable's value (e.g. a `NeuronGroup`). + attribute : str + The name of the attribute storing the variable's value. `attribute` has + to be an attribute of `obj`. + constant : bool, optional + Whether the attribute's value is constant during a run. Defaults to + ``False``. + Raises + ------ + AttributeError + If `obj` does not have an attribute `attribute`. + + ''' + def __init__(self, unit, obj, attribute, constant=False): + if not hasattr(obj, attribute): + raise AttributeError('Object %r does not have an attribute %r' % + (obj, attribute)) + + value = getattr(obj, attribute) + + Variable.__init__(self, unit, value, constant=constant) + #: A reference to the object storing the variable's value + self.obj = obj + #: The name of the attribute storing the variable's value + self.attribute = attribute + + def get_value(self): + return getattr(self.obj, self.attribute) + + def __repr__(self): + description = ('{classname}(unit={unit}, obj={obj}, ' + 'attribute={attribute}, constant={constant})') + return description.format(classname=self.__class__.__name__, + unit=repr(self.unit), + obj=repr(self.obj), + attribute=repr(self.attribute), + constant=repr(self.constant)) + + +class VariableView(object): + + def __init__(self, name, variable, group, unit=None, level=0): + self.name = name + self.variable = variable + self.group = group + self.unit = unit + self.level = level + + def __getitem__(self, i): + variable = self.variable + if variable.scalar: + if not (i == slice(None) or i == 0 or (hasattr(i, '__len__') and len(i) == 0)): + raise IndexError('Variable is a scalar variable.') + indices = 0 + else: + indices = self.group.indices[self.group.variable_indices[self.name]][i] + if self.unit is None or have_same_dimensions(self.unit, Unit(1)): + return variable.get_value()[indices] + else: + return Quantity(variable.get_value()[indices], self.unit.dimensions) + + def __setitem__(self, i, value): + variable = self.variable + if variable.scalar: + if not (i == slice(None) or i == 0 or (hasattr(i, '__len__') and len(i) == 0)): + raise IndexError('Variable is a scalar variable.') + indices = np.array([0]) + else: + indices = self.group.indices[self.group.variable_indices[self.name]][i] + if isinstance(value, basestring): + check_units = self.unit is not None + self.group._set_with_code(variable, indices, value, + check_units, level=self.level + 1) + else: + if not self.unit is None: + fail_for_dimension_mismatch(value, self.unit) + variable.value[indices] = value + + def __array__(self, dtype=None): + if dtype is not None and dtype != self.variable.dtype: + raise NotImplementedError('Changing dtype not supported') + return self[:] + + def __add__(self, other): + return self[:] + other + + def __sub__(self, other): + return self[:] - other + + def __mul__(self, other): + return self[:] * other + + def __div__(self, other): + return self[:] / other + + def __iadd__(self, other): + if isinstance(other, basestring): + raise TypeError(('In-place modification with strings not ' + 'supported. Use group.var = "var + expression" ' + 'instead of group.var += "expression".')) + else: + rhs = self[:] + other + self[:] = rhs + return self + + def __isub__(self, other): + if isinstance(other, basestring): + raise TypeError(('In-place modification with strings not ' + 'supported. Use group.var = "var - expression" ' + 'instead of group.var -= "expression".')) + else: + rhs = self[:] - other + self[:] = rhs + return self + + def __imul__(self, other): + if isinstance(other, basestring): + raise TypeError(('In-place modification with strings not ' + 'supported. Use group.var = "var * expression" ' + 'instead of group.var *= "expression".')) + else: + rhs = self[:] * other + self[:] = rhs + return self + + def __idiv__(self, other): + if isinstance(other, basestring): + raise TypeError(('In-place modification with strings not ' + 'supported. Use group.var = "var / expression" ' + 'instead of group.var /= "expression".')) + else: + rhs = self[:] / other + self[:] = rhs + return self + + def __repr__(self): + varname = self.name + if self.unit is None: + varname += '_' + return '<%s.%s: %r>' % (self.group.name, varname, + self[:]) + + +class ArrayVariable(Variable): + ''' + An object providing information about a model variable stored in an array + (for example, all state variables). + + Parameters + ---------- + name : str + The name of the variable. + unit : `Unit` + The unit of the variable + value : `numpy.ndarray` + A reference to the array storing the data for the variable. + group_name : str, optional + The name of the group to which this variable belongs. + constant : bool, optional + Whether the variable's value is constant during a run. + Defaults to ``False``. + scalar : bool, optional + Whether this array is a 1-element array that should be treated like a + scalar (e.g. for a single delay value across synapses). Defaults to + ``False``. + is_bool: bool, optional + Whether this is a boolean variable (also implies it is dimensionless). + Defaults to ``False`` + ''' + def __init__(self, name, unit, value, group_name=None, constant=False, + scalar=False, is_bool=False): + + self.name = name + + Variable.__init__(self, unit, value, scalar=scalar, + constant=constant, is_bool=is_bool) + #: The reference to the array storing the data for the variable. + self.value = value + + group_name = '_'+group_name+'_' if group_name is not None else '_' + #: The name for the array used in generated code + self.arrayname = '_array' + group_name + name + + def get_value(self): + return self.value + + def set_value(self, value): + self.value[:] = value + + def get_addressable_value(self, group, level=0): + return VariableView(self.name, self, group, None, level) + + def get_addressable_value_with_unit(self, group, level=0): + return VariableView(self.name, self, group, self.unit, level) + + +class DynamicArrayVariable(ArrayVariable): + ''' + An object providing information about a model variable stored in a dynamic + array (used in `Synapses`). + ''' + + def get_value(self): + # The actual numpy array is accesible via DynamicArray1D.data + return self.value.data + + +class Subexpression(Variable): + ''' + An object providing information about a static equation in a model + definition, used as a hint in optimising. Can test if a variable is used + via ``var in spec``. The specifier is also able to return the result of + the expression. + + Parameters + ---------- + unit : `Unit` + The unit of the static equation + dtype : `numpy.dtype` + The dtype used for the expression. + expr : str + The expression defining the static equation. + variables : dict + The variables dictionary, containing variables for the + model variables used in the expression + namespace : dict + The namespace dictionary, containing identifiers for all the external + variables/functions used in the expression + is_bool: bool, optional + Whether this is a boolean variable (also implies it is dimensionless). + Defaults to ``False`` + ''' + def __init__(self, unit, dtype, expr, variables, namespace, + is_bool=False): + Variable.__init__(self, unit, value=None, dtype=dtype, + constant=False, scalar=False, is_bool=is_bool) + + #: The expression defining the static equation. + self.expr = expr.strip() + #: The identifiers used in the expression + self.identifiers = get_identifiers(expr) + #: Specifiers for the identifiers used in the expression + self.variables = variables + + #: The NeuronGroup's namespace for the identifiers used in the + #: expression + self.namespace = namespace + + #: An additional namespace provided by the run function (and updated + #: in `NeuronGroup.pre_run`) that is used if the NeuronGroup does not + #: have an explicitly defined namespace. + self.additional_namespace = None + + def get_value(self): + raise AssertionError('get_value should never be called for a Subexpression') + + def __contains__(self, var): + return var in self.identifiers + + def __repr__(self): + description = ('<{classname}(unit={unit}, dtype={dtype}, ' + 'expr={expr}, variables=<...>, namespace=<....>)>') + return description.format(classname=self.__class__.__name__, + unit=repr(self.unit), + dtype=repr(self.dtype), + expr=repr(self.expr)) + diff --git a/brian2/equations/equations.py b/brian2/equations/equations.py index 08db46bd5..e5286ee40 100644 --- a/brian2/equations/equations.py +++ b/brian2/equations/equations.py @@ -699,7 +699,7 @@ def _sort_static_equations(self): elif eq.type == PARAMETER: eq.update_order = len(sorted_eqs) + 1 - def check_units(self, namespace, specifiers, additional_namespace=None): + def check_units(self, namespace, variables, additional_namespace=None): ''' Check all the units for consistency. @@ -708,8 +708,8 @@ def check_units(self, namespace, specifiers, additional_namespace=None): namespace : `CompoundNamespace` The namespace for resolving external identifiers, should be provided by the `NeuronGroup` or `Synapses`. - specifiers : dict of `Specifier` objects - The specifiers of the state variables and internal variables + variables : dict of `Variable` objects + The variables of the state variables and internal variables (e.g. t and dt) additional_namespace = (str, dict-like) A namespace tuple (name and dictionary), describing the additional @@ -724,7 +724,7 @@ def check_units(self, namespace, specifiers, additional_namespace=None): ''' external = frozenset().union(*[expr.identifiers for _, expr in self.eq_expressions]) - external -= set(specifiers.keys()) + external -= set(variables.keys()) resolved_namespace = namespace.resolve_all(external, additional_namespace, @@ -737,10 +737,10 @@ def check_units(self, namespace, specifiers, additional_namespace=None): if eq.type == DIFFERENTIAL_EQUATION: check_unit(str(eq.expr), self.units[var] / second, - resolved_namespace, specifiers) + resolved_namespace, variables) elif eq.type == STATIC_EQUATION: check_unit(str(eq.expr), self.units[var], - resolved_namespace, specifiers) + resolved_namespace, variables) else: raise AssertionError('Unknown equation type: "%s"' % eq.type) diff --git a/brian2/equations/unitcheck.py b/brian2/equations/unitcheck.py index 693bd5a33..9c87be5e1 100644 --- a/brian2/equations/unitcheck.py +++ b/brian2/equations/unitcheck.py @@ -16,7 +16,7 @@ from brian2.codegen.translation import analyse_identifiers from brian2.parsing.expressions import parse_expression_unit from brian2.parsing.statements import parse_statement -from brian2.core.specifiers import VariableSpecifier +from brian2.core.variables import Variable __all__ = ['unit_from_string', 'unit_from_expression', 'check_unit', 'check_units_statements'] @@ -88,7 +88,7 @@ def unit_from_string(unit_string): return evaluated_unit -def check_unit(expression, unit, namespace, specifiers): +def check_unit(expression, unit, namespace, variables): ''' Evaluates the unit for an expression in a given namespace. @@ -98,7 +98,7 @@ def check_unit(expression, unit, namespace, specifiers): The expression to evaluate. namespace : dict-like The namespace of external variables. - specifiers : dict of `Specifier` objects + variables : dict of `Variable` objects The information about the internal variables Raises @@ -112,13 +112,13 @@ def check_unit(expression, unit, namespace, specifiers): -------- unit_from_expression ''' - expr_unit = parse_expression_unit(expression, namespace, specifiers) + expr_unit = parse_expression_unit(expression, namespace, variables) fail_for_dimension_mismatch(expr_unit, unit, ('Expression %s does not ' 'have the expected units' % expression)) -def check_units_statements(code, namespace, specifiers): +def check_units_statements(code, namespace, variables): ''' Check the units for a series of statements. Setting a model variable has to use the correct unit. For newly introduced temporary variables, the unit @@ -131,7 +131,7 @@ def check_units_statements(code, namespace, specifiers): The expression to evaluate. namespace : dict-like The namespace of external variables. - specifiers : dict of `Specifier` objects + variables : dict of `Variable` objects The information about the internal variables Raises @@ -141,7 +141,7 @@ def check_units_statements(code, namespace, specifiers): DimensionMismatchError If an unit mismatch occurs during the evaluation. ''' - known = set(specifiers.keys()) | set(namespace.keys()) + known = set(variables.keys()) | set(namespace.keys()) newly_defined, _, unknown = analyse_identifiers(code, known) if len(unknown): @@ -149,9 +149,9 @@ def check_units_statements(code, namespace, specifiers): 'not happen at this stage. Unkown identifiers: %s' % unknown)) - # We want to add newly defined variables to the specifiers dictionary so we + # We want to add newly defined variables to the variables dictionary so we # make a copy now - specs = dict(specifiers) + variables = dict(variables) code = re.split(r'[;\n]', code) for line in code: @@ -159,10 +159,10 @@ def check_units_statements(code, namespace, specifiers): if not len(line): continue # skip empty lines - var, op, expr = parse_statement(line) + varname, op, expr = parse_statement(line) if op in ('+=', '-=', '*=', '/=', '%='): # Replace statements such as "w *=2" by "w = w * 2" - expr = '{var} {op_first} {expr}'.format(var=var, + expr = '{var} {op_first} {expr}'.format(var=varname, op_first=op[0], expr=expr) op = '=' @@ -170,18 +170,19 @@ def check_units_statements(code, namespace, specifiers): pass else: raise AssertionError('Unknown operator "%s"' % op) - - expr_unit = parse_expression_unit(expr, namespace, specs) - if var in specifiers: - fail_for_dimension_mismatch(specifiers[var].unit, + expr_unit = parse_expression_unit(expr, namespace, variables) + + if varname in variables: + fail_for_dimension_mismatch(variables[varname].unit, expr_unit, ('Code statement "%s" does not use ' 'correct units' % line)) - elif var in newly_defined: + elif varname in newly_defined: # note the unit for later - specs[var] = VariableSpecifier(var, expr_unit) + variables[varname] = Variable(expr_unit, is_bool=False, + scalar=False) else: - raise AssertionError(('Variable "%s" is neither in the specifiers ' + raise AssertionError(('Variable "%s" is neither in the variables ' 'dictionary nor in the list of undefined ' - 'variables.' % var)) \ No newline at end of file + 'variables.' % varname)) \ No newline at end of file diff --git a/brian2/groups/group.py b/brian2/groups/group.py index df20505d9..0cb9a443f 100644 --- a/brian2/groups/group.py +++ b/brian2/groups/group.py @@ -3,12 +3,13 @@ saves state variables, e.g. `NeuronGroup` or `StateMonitor`. ''' import weakref +from collections import defaultdict import numpy as np from brian2.core.base import BrianObject -from brian2.core.specifiers import (ArrayVariable, Index, AttributeValue, - ReadOnlyValue) +from brian2.core.variables import (ArrayVariable, StochasticVariable, + AttributeVariable, Variable) from brian2.core.namespace import get_local_namespace from brian2.units.fundamentalunits import fail_for_dimension_mismatch, Unit from brian2.units.allunits import second @@ -17,21 +18,22 @@ from brian2.equations.unitcheck import check_units_statements from brian2.utils.logger import get_logger -__all__ = ['Group', 'GroupCodeRunner', 'Indices'] +__all__ = ['Group', 'GroupCodeRunner'] logger = get_logger(__name__) -class Indices(object): +class GroupItemMapping(Variable): - def __init__(self, N): + def __init__(self, N, offset, group): self.N = N - self._indices = np.arange(self.N) - self.specifiers = {'i': ArrayVariable('i', + self.offset = int(offset) + self.group = weakref.proxy(group) + self._indices = np.arange(self.N + self.offset) + self.variables = {'i': ArrayVariable('i', Unit(1), - self._indices.dtype, - self._indices, - '_neuron_idx')} + self._indices - self.offset)} + Variable.__init__(self, Unit(1), value=self, constant=True) def __len__(self): return self.N @@ -47,12 +49,29 @@ def __getitem__(self, index): 'got %d dimensions.') % len(index)) if isinstance(index, basestring): # interpret the string expression - namespace = {'i': self._indices} - - result = eval(index, namespace) - return np.flatnonzero(result) + namespace = get_local_namespace(1) + additional_namespace = ('implicit-namespace', namespace) + abstract_code = '_cond = ' + index + check_code_units(abstract_code, self.group, + additional_variables=self.variables, + additional_namespace=additional_namespace) + codeobj = create_runner_codeobj(self.group, + abstract_code, + 'state_variable_indexing', + additional_variables=self.variables, + additional_namespace=additional_namespace, + ) + return codeobj() else: - return self._indices[index] + if isinstance(index, slice): + start, stop, step = index.indices(self.N) + index = slice(start + self.offset, stop + self.offset, step) + return self._indices[index] + else: + index_array = np.asarray(index) + if not np.issubdtype(index_array.dtype, np.int): + raise TypeError('Indexing is only supported for integer arrays') + return self._indices[index_array + self.offset] class Group(object): @@ -63,31 +82,31 @@ class Group(object): # (should make autocompletion work) ''' def __init__(self): - if not hasattr(self, 'specifiers'): - raise ValueError('Classes derived from Group need specifiers attribute.') - if not hasattr(self, 'indices'): + if not hasattr(self, 'offset'): + self.offset = 0 + if not hasattr(self, 'variables'): + raise ValueError('Classes derived from Group need variables attribute.') + if not hasattr(self, 'item_mapping'): try: N = len(self) except TypeError: - raise ValueError(('Classes derived from Group need an indices ' + raise ValueError(('Classes derived from Group need an item_mapping ' 'attribute, or a length to automatically ' 'provide 1-d indexing')) - self.indices = Indices(N) - + self.item_mapping = GroupItemMapping(N, self.offset, self) + if not hasattr(self, 'indices'): + self.indices = {'_idx': self.item_mapping} + if not hasattr(self, 'variable_indices'): + self.variable_indices = defaultdict(lambda: '_idx') if not hasattr(self, 'codeobj_class'): self.codeobj_class = None - - # Add a reference to the synapses to the template - self.specifiers['_indices'] = ReadOnlyValue('_indices', Unit(1), - np.int, self.indices) - self._group_attribute_access_active = True - def _create_specifiers(self): - return {'t': AttributeValue('t', second, np.float64, - self.clock, 't_'), - 'dt': AttributeValue('dt', second, np.float64, - self.clock, 'dt_', constant=True) + def _create_variables(self): + return {'t': AttributeVariable(second, self.clock, 't_', + constant=False), + 'dt': AttributeVariable(second, self.clock, 'dt_', + constant=True) } def state_(self, name): @@ -95,7 +114,7 @@ def state_(self, name): Gets the unitless array. ''' try: - return self.specifiers[name].get_addressable_value() + return self.variables[name].get_addressable_value(self) except KeyError: raise KeyError("Array named "+name+" not found.") @@ -104,8 +123,8 @@ def state(self, name): Gets the array with units. ''' try: - spec = self.specifiers[name] - return spec.get_addressable_value_with_unit() + var = self.variables[name] + return var.get_addressable_value_with_unit(self) except KeyError: raise KeyError("Array named "+name+" not found.") @@ -140,22 +159,22 @@ def __setattr__(self, name, val): # Group.__init__ if not hasattr(self, '_group_attribute_access_active'): object.__setattr__(self, name, val) - elif name in self.specifiers: - spec = self.specifiers[name] + elif name in self.variables: + var = self.variables[name] if not isinstance(val, basestring): - fail_for_dimension_mismatch(val, spec.unit, + fail_for_dimension_mismatch(val, var.unit, 'Incorrect units for setting %s' % name) # Make the call X.var = ... equivalent to X.var[:] = ... - spec.get_addressable_value_with_unit(level=1)[:] = val - elif len(name) and name[-1]=='_' and name[:-1] in self.specifiers: + var.get_addressable_value_with_unit(self, level=1)[:] = val + elif len(name) and name[-1]=='_' and name[:-1] in self.variables: # no unit checking - spec = self.specifiers[name[:-1]] + var = self.variables[name[:-1]] # Make the call X.var = ... equivalent to X.var[:] = ... - spec.get_addressable_value(level=1)[:] = val + var.get_addressable_value(self, level=1)[:] = val else: object.__setattr__(self, name, val) - def _set_with_code(self, specifier, group_indices, code, + def _set_with_code(self, variable, group_indices, code, check_units=True, level=0): ''' Sets a variable using a string expression. Is called by @@ -164,8 +183,8 @@ def _set_with_code(self, specifier, group_indices, code, Parameters ---------- - specifier : `ArrayVariable` - The `Specifier` for the variable to be set + variable : `ArrayVariable` + The `Variable` for the variable to be set group_indices : ndarray of int The indices of the elements that are to be set. code : str @@ -178,34 +197,88 @@ def _set_with_code(self, specifier, group_indices, code, Necessary so that both `X.var = ` and `X.var[:] = ` have access to the surrounding namespace. ''' - abstract_code = specifier.name + ' = ' + code - indices = {'_neuron_idx': Index('_neuron_idx', iterate_all=False)} + abstract_code = variable.name + ' = ' + code namespace = get_local_namespace(level + 1) additional_namespace = ('implicit-namespace', namespace) - additional_specifiers = dict(self.indices.specifiers) # TODO: Find a name that makes sense for reset and variable setting # with code - additional_specifiers['_spikes'] = ArrayVariable('_spikes', + additional_variables = self.item_mapping.variables + additional_variables['_spikes'] = ArrayVariable('_spikes', Unit(1), - np.int32, - group_indices.astype(np.int32), - '', # no index, - self) + value=group_indices.astype(np.int32), + group_name=self.name) + # TODO: Have an additional argument to avoid going through the index + # array for situations where iterate_all could be used codeobj = create_runner_codeobj(self, abstract_code, 'reset', - indices, - additional_specifiers=additional_specifiers, + additional_variables=additional_variables, additional_namespace=additional_namespace, - check_units=check_units, - codeobj_class=self.codeobj_class) + check_units=check_units) codeobj() -def create_runner_codeobj(group, code, template_name, indices, - name=None, check_units=True, additional_specifiers=None, - additional_namespace=None, template_kwds=None, - codeobj_class=None): +def check_code_units(code, group, additional_variables=None, + additional_namespace=None, + ignore_keyerrors=False): + ''' + Check statements for correct units. + + Parameters + ---------- + code : str + The series of statements to check + group : `Group` + The context for the code execution + additional_variables : dict-like, optional + A mapping of names to `Variable` objects, used in addition to the + variables saved in `self.group`. + additional_namespace : dict-like, optional + An additional namespace, as provided to `Group.pre_run` + ignore_keyerrors : boolean, optional + Whether to silently ignore unresolvable identifiers. Should be set + to ``False`` (the default) if the namespace is expected to be + complete (e.g. in `Group.pre_run`) but to ``True`` when the check + is done during object initialisation where the namespace is not + necessarily complete yet + + Raises + ------ + DimensionMismatchError + If `code` has unit mismatches + ''' + all_variables = dict(group.variables) + if additional_variables is not None: + all_variables.update(additional_variables) + + # Resolve the namespace, resulting in a dictionary containing only the + # external variables that are needed by the code -- keep the units for + # the unit checks + # Note that here we do not need to recursively descend into + # subexpressions. For unit checking, we only need to know the units of + # the subexpressions not what variables they refer to + _, _, unknown = analyse_identifiers(code, all_variables) + try: + resolved_namespace = group.namespace.resolve_all(unknown, + additional_namespace, + strip_units=False) + except KeyError as ex: + if ignore_keyerrors: + logger.debug('Namespace not complete (yet), ignoring: %s ' % str(ex), + 'check_code_units') + return + else: + raise ex + + check_units_statements(code, resolved_namespace, all_variables) + + +def create_runner_codeobj(group, code, template_name, indices=None, + variable_indices=None, + name=None, check_units=True, + additional_variables=None, + additional_namespace=None, + template_kwds=None): ''' Create a `CodeObject` for the execution of code in the context of a `Group`. @@ -217,78 +290,62 @@ def create_runner_codeobj(group, code, template_name, indices, The code to be executed. template : `LanguageTemplater` The template to use for the code. - indices : dict-like + indices : dict-like, optional A mapping from index name to `Index` objects, describing the indices - used for the variables in the code. + used for the variables in the code. If none are given, uses the + corresponding attribute of `group`. + variable_indices : dict-like, optional + A mapping from `Variable` objects to index names (strings). If none is + given, uses the corresponding attribute of `group`. name : str, optional A name for this code object, will use ``group + '_codeobject*'`` if none is given. check_units : bool, optional Whether to check units in the statement. Defaults to ``True``. - additional_specifiers : dict-like, optional - A mapping of names to `Specifier` objects, used in addition to the - specifiers saved in `group`. + additional_variables : dict-like, optional + A mapping of names to `Variable` objects, used in addition to the + variables saved in `group`. additional_namespace : dict-like, optional A mapping from names to objects, used in addition to the namespace saved in `group`. template_kwds : dict, optional A dictionary of additional information that is passed to the template. - codeobj_class : `CodeObject`, optional - The `CodeObject` class to create. ''' logger.debug('Creating code object for abstract code:\n' + str(code)) - - if group is not None: - all_specifiers = dict(group.specifiers) - else: - all_specifiers = {} - # If the GroupCodeRunner has specifiers, add them - if additional_specifiers is not None: - all_specifiers.update(additional_specifiers) template = get_codeobject_template(template_name, - codeobj_class=codeobj_class) - - if check_units: - # Resolve the namespace, resulting in a dictionary containing only the - # external variables that are needed by the code -- keep the units for - # the unit checks - # Note that here, in contrast to the namespace resolution below, we do - # not need to recursively descend into subexpressions. For unit - # checking, we only need to know the units of the subexpressions, - # not what variables they refer to - _, _, unknown = analyse_identifiers(code, all_specifiers) - resolved_namespace = group.namespace.resolve_all(unknown, - additional_namespace, - strip_units=False) + codeobj_class=group.codeobj_class) - check_units_statements(code, resolved_namespace, all_specifiers) + all_variables = dict(group.variables) + if additional_variables is not None: + all_variables.update(additional_variables) # Determine the identifiers that were used - _, used_known, unknown = analyse_identifiers(code, all_specifiers, + _, used_known, unknown = analyse_identifiers(code, all_variables, recursive=True) logger.debug('Unknown identifiers in the abstract code: ' + str(unknown)) resolved_namespace = group.namespace.resolve_all(unknown, additional_namespace) - # Only pass the specifiers that are actually used - specifiers = {} + # Only pass the variables that are actually used + variables = {} for var in used_known: - specifiers[var] = all_specifiers[var] + if not isinstance(all_variables[var], StochasticVariable): + variables[var] = all_variables[var] - # Also add the specifiers that the template needs - for spec in template.specifiers: + # Also add the variables that the template needs + for var in template.variables: try: - specifiers[spec] = all_specifiers[spec] + variables[var] = all_variables[var] except KeyError as ex: - # We abuse template.specifiers here to also store names of things + # We abuse template.variables here to also store names of things # from the namespace (e.g. rand) that are needed # TODO: Improve all of this namespace/specifier handling if group is not None: # Try to find the name in the group's namespace - resolved_namespace[spec] = group.namespace.resolve(spec, - additional_namespace) + resolved_namespace[var] = group.namespace.resolve(var, + additional_namespace) else: raise ex @@ -298,14 +355,20 @@ def create_runner_codeobj(group, code, template_name, indices, else: name = '_codeobject*' + if indices is None: + indices = group.indices + if variable_indices is None: + variable_indices = group.variable_indices + return create_codeobject(name, code, resolved_namespace, - specifiers, + variables, template_name, indices=indices, + variable_indices=variable_indices, template_kwds=template_kwds, - codeobj_class=codeobj_class) + codeobj_class=group.codeobj_class) class GroupCodeRunner(BrianObject): @@ -328,10 +391,6 @@ class GroupCodeRunner(BrianObject): The abstract code that should be executed every time step. The `update_abstract_code` method might generate this code dynamically before every run instead. - iterate_all : bool, optional - Whether the index iterates over all possible values (``True``, the - default) or only over a subset (``False``, used for example for the - reset which only affects neurons that have spiked). when : `Scheduler`, optional At which point in the schedule this object should be executed. name : str, optional @@ -356,28 +415,14 @@ class GroupCodeRunner(BrianObject): state of the `Group`. For example, the `Thresholder` sets the `NeuronGroup.spikes` property in `post_update`. ''' - def __init__(self, group, template, indices, code=None, iterate_all=True, - when=None, name='coderunner*', check_units=True, - template_kwds=None): + def __init__(self, group, template, code=None, when=None, + name='coderunner*', check_units=True, template_kwds=None): BrianObject.__init__(self, when=when, name=name) - self.indices = indices self.group = weakref.proxy(group) self.template = template self.abstract_code = code - self.iterate_all = iterate_all self.check_units = check_units self.template_kwds = template_kwds - # Try to generate the abstract code and the codeobject without any - # additional namespace. This might work in situations where the - # namespace is completely defined in the NeuronGroup. In this case, - # we might spot parsing or unit errors already now and don't have to - # wait until the run call. We want to ignore KeyErrors, though, because - # they possibly result from an incomplete namespace, which is still ok - # at this time. - try: - self.pre_run(None) - except KeyError: - pass def update_abstract_code(self): ''' @@ -389,28 +434,23 @@ def update_abstract_code(self): ''' pass - def _create_codeobj(self, additional_namespace=None): - ''' A little helper function to reduce the amount of repetition when - calling the language's _create_codeobj (always pass self.specifiers and - self.namespace + additional namespace). - ''' - - # If the GroupCodeRunner has specifiers, add them - if hasattr(self, 'specifiers'): - additional_specifiers = self.specifiers - else: - additional_specifiers = None - - return create_runner_codeobj(self.group, self.abstract_code, self.template, - self.indices, self.name, self.check_units, - additional_specifiers=additional_specifiers, - additional_namespace=additional_namespace, - template_kwds=self.template_kwds, - codeobj_class=self.group.codeobj_class) - def pre_run(self, namespace): self.update_abstract_code() - self.codeobj = self._create_codeobj(additional_namespace=namespace) + # If the GroupCodeRunner has variables, add them + if hasattr(self, 'variables'): + additional_variables = self.variables + else: + additional_variables = None + if self.check_units: + check_code_units(self.abstract_code, self.group, + additional_variables, namespace) + self.codeobj = create_runner_codeobj(self.group, self.abstract_code, + self.template, + name=self.name, + check_units=self.check_units, + additional_variables=additional_variables, + additional_namespace=namespace, + template_kwds=self.template_kwds) def pre_update(self): ''' diff --git a/brian2/groups/neurongroup.py b/brian2/groups/neurongroup.py index eb9286451..f653880de 100644 --- a/brian2/groups/neurongroup.py +++ b/brian2/groups/neurongroup.py @@ -13,16 +13,18 @@ from brian2.core.preferences import brian_prefs from brian2.core.base import BrianObject from brian2.core.namespace import create_namespace -from brian2.core.specifiers import (ReadOnlyValue, AttributeValue, ArrayVariable, - StochasticVariable, Subexpression, Index) +from brian2.core.variables import (Variable, AttributeVariable, ArrayVariable, + StochasticVariable, Subexpression) from brian2.core.spikesource import SpikeSource from brian2.core.scheduler import Scheduler -from brian2.parsing.expressions import parse_expression_unit, is_boolean_expression +from brian2.parsing.expressions import (parse_expression_unit, + is_boolean_expression) from brian2.utils.logger import get_logger from brian2.units.allunits import second from brian2.units.fundamentalunits import Quantity, Unit, have_same_dimensions -from .group import Group, GroupCodeRunner +from .group import Group, GroupCodeRunner, check_code_units +from .subgroup import Subgroup __all__ = ['NeuronGroup'] @@ -36,24 +38,27 @@ class StateUpdater(GroupCodeRunner): ''' def __init__(self, group, method): self.method_choice = method - indices = {'_neuron_idx': Index('_neuron_idx', True)} GroupCodeRunner.__init__(self, group, 'stateupdate', - indices=indices, when=(group.clock, 'groups'), name=group.name + '_stateupdater*', check_units=False) self.method = StateUpdateMethod.determine_stateupdater(self.group.equations, - self.group.specifiers, + self.group.variables, method) - + + # Generate the full abstract code to catch errors in the refractoriness + # formulation. However, do not fail on KeyErrors since the + # refractoriness might refer to variables that don't exist yet + try: + self.update_abstract_code() + except KeyError as ex: + logger.debug('Namespace not complete (yet), ignoring: %s ' % str(ex), + 'StateUpdater') + def update_abstract_code(self): - - self.method = StateUpdateMethod.determine_stateupdater(self.group.equations, - self.group.specifiers, - self.method_choice) # Update the not_refractory variable for the refractory period mechanism ref = self.group._refractory @@ -64,12 +69,12 @@ def update_abstract_code(self): self.abstract_code = 'not_refractory = 1*((t - lastspike) > %f)\n' % ref else: namespace = self.group.namespace - unit = parse_expression_unit(str(ref), namespace, self.group.specifiers) + unit = parse_expression_unit(str(ref), namespace, self.group.variables) if have_same_dimensions(unit, second): self.abstract_code = 'not_refractory = 1*((t - lastspike) > %s)\n' % ref elif have_same_dimensions(unit, Unit(1)): if not is_boolean_expression(str(ref), namespace, - self.group.specifiers): + self.group.variables): raise TypeError(('Refractory expression is dimensionless ' 'but not a boolean value. It needs to ' 'either evaluate to a timespan or to a ' @@ -85,7 +90,7 @@ def update_abstract_code(self): '"%s" has units %s instead') % (ref, unit)) self.abstract_code += self.method(self.group.equations, - self.group.specifiers) + self.group.variables) class Thresholder(GroupCodeRunner): @@ -95,19 +100,23 @@ class Thresholder(GroupCodeRunner): and ``refractory_until`` attributes. ''' def __init__(self, group): - indices = {'_neuron_idx': Index('_neuron_idx', True)} # For C++ code, we need these names explicitly, since not_refractory # and lastspike might also be used in the threshold condition -- the # names will then refer to single (constant) values and cannot be used # for assigning new values - template_kwds = {'_array_not_refractory': group.specifiers['not_refractory'].arrayname, - '_array_lastspike': group.specifiers['lastspike'].arrayname} + template_kwds = {'_array_not_refractory': group.variables['not_refractory'].arrayname, + '_array_lastspike': group.variables['lastspike'].arrayname} GroupCodeRunner.__init__(self, group, 'threshold', - indices=indices, when=(group.clock, 'thresholds'), name=group.name+'_thresholder*', template_kwds=template_kwds) + + # Check the abstract code for unit mismatches (only works if the + # namespace is already complete) + self.update_abstract_code() + check_code_units(self.abstract_code, self.group, ignore_keyerrors=True) + def update_abstract_code(self): self.abstract_code = '_cond = ' + self.group.threshold @@ -123,13 +132,16 @@ class Resetter(GroupCodeRunner): variables of neurons that have spiked in this timestep. ''' def __init__(self, group): - indices = {'_neuron_idx': Index('_neuron_idx', False)} GroupCodeRunner.__init__(self, group, 'reset', - indices=indices, when=(group.clock, 'resets'), name=group.name + '_resetter*') - + + # Check the abstract code for unit mismatches (only works if the + # namespace is already complete) + self.update_abstract_code() + check_code_units(self.abstract_code, self.group, ignore_keyerrors=True) + def update_abstract_code(self): self.abstract_code = self.group.reset @@ -236,8 +248,8 @@ def __init__(self, N, model, method=None, # Setup the namespace self.namespace = create_namespace(namespace) - # Setup specifiers - self.specifiers = self._create_specifiers() + # Setup variables + self.variables = self._create_variables() # All of the following will be created in pre_run @@ -298,6 +310,22 @@ def __len__(self): ''' return self.N + + def __getitem__(self, item): + if not isinstance(item, slice): + raise TypeError('Subgroups can only be constructed using slicing syntax') + start, stop, step = item.indices(self.N) + if step != 1: + raise IndexError('Subgroups have to be contiguous') + if stop > self.N: + raise IndexError(('Cannot extend subgroup to index %d, ' + 'group has only %d items') % (stop, self.N)) + if start >= stop: + raise IndexError('Illegal start/end values for subgroup, %d>=%d' % + (start, stop)) + + return Subgroup(self, start, stop) + def _allocate_memory(self, dtype=None): # Allocate memory (TODO: this should be refactored somewhere at some point) @@ -349,65 +377,59 @@ def runner(self, code, when=None, name=None): code=code, name=name, when=when) return runner - def _create_specifiers(self): + def _create_variables(self): ''' - Create the specifiers dictionary for this `NeuronGroup`, containing + Create the variables dictionary for this `NeuronGroup`, containing entries for the equation variables and some standard entries. ''' - # Get the standard specifiers for all groups - s = Group._create_specifiers(self) + # Get the standard variables for all groups + s = Group._create_variables(self) - # Standard specifiers always present - s.update({'_num_neurons': ReadOnlyValue('_num_neurons', Unit(1), - np.int, self.N), - '_spikes': AttributeValue('_spikes', Unit(1), np.int32, - self, 'spikes')}) + # Standard variables always present + s.update({'_spikes': AttributeVariable(Unit(1), self, + 'spikes', constant=False)}) - # First add all the differential equations and parameters, because they - # may be referred to by static equations for eq in self.equations.itervalues(): if eq.type in (DIFFERENTIAL_EQUATION, PARAMETER): array = self.arrays[eq.varname] constant = ('constant' in eq.flags) s.update({eq.varname: ArrayVariable(eq.varname, eq.unit, - array.dtype, array, - '_neuron_idx', - self, + group_name=self.name, constant=constant, is_bool=eq.is_bool)}) elif eq.type == STATIC_EQUATION: - s.update({eq.varname: Subexpression(eq.varname, eq.unit, + s.update({eq.varname: Subexpression(eq.unit, brian_prefs['core.default_scalar_dtype'], str(eq.expr), - s, - self.namespace, + variables=s, + namespace=self.namespace, is_bool=eq.is_bool)}) else: raise AssertionError('Unknown type of equation: ' + eq.eq_type) # Stochastic variables for xi in self.equations.stochastic_variables: - s.update({xi: StochasticVariable(xi)}) + s.update({xi: StochasticVariable()}) return s def pre_run(self, namespace): - # Update the namespace information in the specifiers in case the + # Update the namespace information in the variables in case the # namespace was not specified explicitly defined at creation time # Note that values in the explicit namespace might still change # between runs, but the Subexpression stores a reference to # self.namespace so these changes are taken into account automatically if not self.namespace.is_explicit: - for spec in self.specifiers.itervalues(): - if isinstance(spec, Subexpression): - spec.additional_namespace = namespace + for var in self.variables.itervalues(): + if isinstance(var, Subexpression): + var.additional_namespace = namespace # Check units - self.equations.check_units(self.namespace, self.specifiers, + self.equations.check_units(self.namespace, self.variables, namespace) def _repr_html_(self): diff --git a/brian2/groups/poissongroup.py b/brian2/groups/poissongroup.py index b51d6b888..978c2ddb7 100644 --- a/brian2/groups/poissongroup.py +++ b/brian2/groups/poissongroup.py @@ -4,12 +4,15 @@ from brian2.core.base import BrianObject from brian2.core.spikesource import SpikeSource from brian2.core.scheduler import Scheduler +from brian2.core.variables import ArrayVariable from brian2.units.fundamentalunits import check_units from brian2.units.stdunits import Hz +from .group import Group + __all__ = ['PoissonGroup'] -class PoissonGroup(BrianObject, SpikeSource): +class PoissonGroup(Group, BrianObject, SpikeSource): ''' Poisson spike source @@ -40,16 +43,21 @@ def __init__(self, N, rates, when=None, name='poissongroup*'): #: The array of spikes from the most recent time step self.spikes = np.array([], dtype=int) - self.rates = rates + self._rates = np.asarray(rates) self.N = N = int(N) self.pthresh = self._calc_threshold() + + self.variables = {'rates': ArrayVariable('rates', Hz, self._rates, + group_name=self.name, + constant=True)} + Group.__init__(self) def __len__(self): return self.N def _calc_threshold(self): - return np.array(self.rates*self.clock.dt) + return np.array(self._rates*self.clock.dt_) def pre_run(self, namespace): self.pthresh = self._calc_threshold() @@ -61,15 +69,5 @@ def __repr__(self): description = '{classname}({N}, rates={rates})' return description.format(classname=self.__class__.__name__, N=self.N, - rates=repr(self.rates)) - + rates=repr(self._rates)) -if __name__=='__main__': - from pylab import * - from brian2 import * - P = PoissonGroup(1000, rates=100*Hz) - M = SpikeMonitor(P) - run(100*ms) - plot(M.t, M.i, '.k') - print 'Estimated rate:', M.num_spikes/(defaultclock.t*len(P)) - show() diff --git a/brian2/groups/subgroup.py b/brian2/groups/subgroup.py index 82ae70c9b..906dcf978 100644 --- a/brian2/groups/subgroup.py +++ b/brian2/groups/subgroup.py @@ -1,17 +1,18 @@ import weakref -from numpy import array, logical_and +import numpy as np from brian2.core.base import BrianObject from brian2.core.spikesource import SpikeSource from brian2.core.scheduler import Scheduler +from brian2.groups.group import Group __all__ = ['Subgroup'] -class Subgroup(BrianObject, SpikeSource): +class Subgroup(Group, BrianObject, SpikeSource): ''' - Subgroup of any `SpikeSource` + Subgroup of any `Group` Parameters ---------- @@ -36,6 +37,8 @@ class Subgroup(BrianObject, SpikeSource): ''' def __init__(self, source, start, end, name=None): self.source = weakref.proxy(source) + if name is None: + name = source.name + '_subgroup*' # We want to update the spikes attribute after it has been updated # by the parent, we do this in slot 'thresholds' with an order # one higher than the parent order to ensure it takes place after the @@ -43,20 +46,24 @@ def __init__(self, source, start, end, name=None): schedule = Scheduler(clock=source.clock, when='thresholds', order=source.order+1) BrianObject.__init__(self, when=schedule, name=name) - self.spikes = array([], dtype=int) self.N = end-start self.start = start self.end = end - + self.offset = start + + self.variables = self.source.variables + self.variable_indices = self.source.variable_indices + self.namespace = self.source.namespace + self.codeobj_class = self.source.codeobj_class + + Group.__init__(self) + + # Make the spikes from the source group accessible + spikes = property(lambda self: self.source.spikes) + def __len__(self): return self.N - def update(self): - spikes = self.source.spikes - # TODO: improve efficiency with bisect? - spikes = spikes[logical_and(spikes>=self.start, spikes 1: delays = np.round(self._delays[indices] / self.dt).astype(int) @@ -211,22 +218,22 @@ def pre_update(self): class IndexView(object): def __init__(self, indices, mapping): - self.indices = indices + self.index = indices self.mapping = mapping def __getitem__(self, item): - synaptic_indices = self.indices[item] - return self.mapping[synaptic_indices] + synaptic_indices = self.index[item] + return synaptic_indices class SynapseIndexView(object): def __init__(self, indices): - self.indices = indices + self.index = indices def __getitem__(self, item): - pre = self.indices.i[item] - post = self.indices.j[item] + pre = self.index.i[item] + post = self.index.j[item] return _synapse_numbers(pre, post) @@ -299,7 +306,7 @@ def _synapse_numbers(pre_neurons, post_neurons): return synapse_numbers -class SynapticIndices(object): +class SynapticItemMapping(Variable): ''' Convenience object to store the synaptic indices. @@ -309,37 +316,32 @@ class SynapticIndices(object): Reference to the main `Synapses object` ''' def __init__(self, synapses): - self.source_len = len(synapses.source) - self.target_len = len(synapses.target) + Variable.__init__(self, Unit(1), value=self, constant=True) + self.source = synapses.source + self.target = synapses.target + source_len = len(synapses.source) + target_len = len(synapses.target) self.synapses = weakref.proxy(synapses) dtype = smallest_inttype(MAX_SYNAPSES) - self.synaptic_pre = DynamicArray1D(0, use_numpy_resize=True, - dtype=dtype, refcheck=False) - self.synaptic_post = DynamicArray1D(0, use_numpy_resize=True, - dtype=dtype, refcheck=False) - self.pre_synaptic = [DynamicArray1D(0, use_numpy_resize=True, - dtype=dtype, refcheck=False) - for _ in xrange(self.source_len)] - self.post_synaptic = [DynamicArray1D(0, use_numpy_resize=True, - dtype=dtype, refcheck=False) - for _ in xrange(self.target_len)] - self.i = IndexView(self, self.synaptic_pre) - self.j = IndexView(self, self.synaptic_post) - self.k = SynapseIndexView(self) - - self.specifiers = {'i': DynamicArrayVariable('i', - Unit(1), - self.synaptic_pre.dtype, - self.synaptic_pre, - '_neuron_idx'), - 'j': DynamicArrayVariable('j', - Unit(1), - self.synaptic_post.dtype, - self.synaptic_post, - '_neuron_idx')} + self.synaptic_pre = DynamicArray1D(0, dtype=dtype) + self.synaptic_post = DynamicArray1D(0, dtype=dtype) + self.pre_synaptic = [DynamicArray1D(0, dtype=dtype) + for _ in xrange(source_len)] + self.post_synaptic = [DynamicArray1D(0, dtype=dtype) + for _ in xrange(target_len)] self._registered_variables = [] + self.variables = {'i': DynamicArrayVariable('i', + Unit(1), + self.synaptic_pre), + 'j': DynamicArrayVariable('j', + Unit(1), + self.synaptic_post)} + self.i = IndexView(self.synaptic_pre, self) + self.j = IndexView(self.synaptic_post, self) + self.k = SynapseIndexView(self) + N = property(fget=lambda self: len(self.synaptic_pre), doc='Total number of synapses') @@ -416,38 +418,36 @@ def _add_synapses(self, sources, targets, n, p, condition=None, abstract_code += '_p = ' + str(p) namespace = get_local_namespace(level + 1) additional_namespace = ('implicit-namespace', namespace) - specifiers = { - '_num_source_neurons': ReadOnlyValue('_num_source_neurons', Unit(1), - np.int32, self.source_len), - '_num_target_neurons': ReadOnlyValue('_num_target_neurons', Unit(1), - np.int32, self.target_len), + variables = { + '_source_neurons': ArrayVariable('_source_neurons', Unit(1), + self.source.item_mapping[:] - + self.source.offset, + constant=True), + '_target_neurons': ArrayVariable('_target_neurons', Unit(1), + self.target.item_mapping[:] - + self.target.offset, + constant=True), # The template needs to have access to the DynamicArray here, # having access to the underlying array (which would be much # faster), is not enough - '_synaptic_pre': ReadOnlyValue('_synaptic_pre', Unit(1), - np.int32, - self.synaptic_pre), - '_synaptic_post': ReadOnlyValue('_synaptic_post', Unit(1), - np.int32, - self.synaptic_post), - '_pre_synaptic': ReadOnlyValue('_pre_synaptic', Unit(1), - np.int32, - self.pre_synaptic), - '_post_synaptic': ReadOnlyValue('_post_synaptic', Unit(1), - np.int32, - self.post_synaptic), + '_synaptic_pre': Variable(Unit(1), + self.synaptic_pre, constant=True), + '_synaptic_post': Variable(Unit(1), + self.synaptic_post, constant=True), + '_pre_synaptic': Variable(Unit(1), + self.pre_synaptic, constant=True), + '_post_synaptic': Variable(Unit(1), + self.post_synaptic, constant=True), # Will be set in the template - 'i': Specifier('i'), - 'j': Specifier('j') + 'i': Variable(unit=Unit(1), constant=True), + 'j': Variable(unit=Unit(1), constant=True) } codeobj = create_runner_codeobj(self.synapses, abstract_code, 'synapses_create', - {}, - additional_specifiers=specifiers, + additional_variables=variables, additional_namespace=additional_namespace, - check_units=False, - codeobj_class=self.synapses.codeobj_class, + check_units=False ) codeobj() number = len(self.synaptic_pre) @@ -501,28 +501,20 @@ def __getitem__(self, index): elif isinstance(index, basestring): # interpret the string expression identifiers = get_identifiers(index) - specifiers = dict(self.specifiers) + variables = dict(self.variables) if 'k' in identifiers: synapse_numbers = _synapse_numbers(self.synaptic_pre[:], self.synaptic_post[:]) - specifiers['k'] = ArrayVariable('k', Unit(1), np.int32, - synapse_numbers, - '_neuron_idx') - # Get the locals and globals from the stack frame - frame = inspect.stack()[2][0] - namespace = dict(frame.f_globals) - namespace.update(frame.f_locals) + variables['k'] = ArrayVariable('k', Unit(1), + synapse_numbers) + namespace = get_local_namespace(1) additional_namespace = ('implicit-namespace', namespace) - indices = {'_neuron_idx': Index('_neuron_idx', iterate_all=True)} abstract_code = '_cond = ' + index codeobj = create_runner_codeobj(self.synapses, abstract_code, 'state_variable_indexing', - indices, - additional_specifiers=specifiers, + additional_variables=variables, additional_namespace=additional_namespace, - check_units=False, - codeobj_class=self.synapses.codeobj_class, ) result = codeobj() @@ -603,11 +595,6 @@ def __init__(self, source, target=None, model=None, pre=None, post=None, self.codeobj_class = codeobj_class - if not hasattr(source, 'spikes') and hasattr(source, 'clock'): - raise TypeError(('Source has to be a SpikeSource with spikes and' - ' clock attribute. Is type %r instead') - % type(source)) - self.source = weakref.proxy(source) if target is None: self.target = self.source @@ -661,14 +648,17 @@ def __init__(self, source, target=None, model=None, pre=None, post=None, self._queues = {} self._delays = {} - self.indices = SynapticIndices(self) + self.item_mapping = SynapticItemMapping(self) + self.indices = {'_idx': self.item_mapping, + '_presynaptic_idx': self.item_mapping.synaptic_pre, + '_postsynaptic_idx': self.item_mapping.synaptic_post} # Allow S.i instead of S.indices.i, etc. - self.i = self.indices.i - self.j = self.indices.j - self.k = self.indices.k + self.i = self.item_mapping.i + self.j = self.item_mapping.j + self.k = self.item_mapping.k - # Setup specifiers - self.specifiers = self._create_specifiers() + # Setup variables + self.variables = self._create_variables() #: List of names of all updaters, e.g. ['pre', 'post'] self._updaters = [] @@ -690,11 +680,7 @@ def __init__(self, source, target=None, model=None, pre=None, post=None, # direct access to its delay via a delay attribute (instead of having # to use pre.delay) if 'pre' in self._updaters: - self.specifiers['delay'] = SynapticArrayVariable('delay', second, - np.float64, - self.pre._delays, - '_neuron_idx', - self) + self.variables['delay'] = self.pre.variables['delay'] if delay is not None: if isinstance(delay, Quantity): @@ -723,17 +709,16 @@ def __init__(self, source, target=None, model=None, pre=None, post=None, 'specified in units ' 'of seconds')) updater = getattr(self, pathway) - self.indices.unregister_variable(updater._delays) + self.item_mapping.unregister_variable(updater._delays) del updater._delays # For simplicity, store the delay as a one-element array # so that for example updater._delays[:] works. updater._delays = np.array([float(pathway_delay)]) - specifier = ArrayVariable('delay', second, np.float64, - updater._delays, '_neuron_idx', - group=self, scalar=True) - updater.specifiers['delay'] = specifier + variable = ArrayVariable('delay', second, updater._delays, + group_name=self.name, scalar=True) + updater.variables['delay'] = variable if pathway == 'pre': - self.specifiers['delay'] = specifier + self.variables['delay'] = variable #: Performs numerical integration step self.state_updater = StateUpdater(self, method) @@ -747,12 +732,12 @@ def __init__(self, source, target=None, model=None, pre=None, post=None, varname = single_equation.varname # For a lumped variable, we need an equivalent parameter in the # target group - if not varname in self.target.specifiers: + if not varname in self.target.variables: raise ValueError(('The lumped variable %s needs a variable ' 'of the same name in the target ' 'group ') % single_equation.varname) - fail_for_dimension_mismatch(self.specifiers[varname].unit, - self.target.specifiers[varname], + fail_for_dimension_mismatch(self.variables[varname].unit, + self.target.variables[varname], ('Lumped variables need to have ' 'the same units in Synapses ' 'and the target group')) @@ -773,7 +758,7 @@ def __init__(self, source, target=None, model=None, pre=None, post=None, # Activate name attribute access Group.__init__(self) - N = property(fget=lambda self: self.indices.N, + N = property(fget=lambda self: self.item_mapping.N, doc='Total number of synapses') def __len__(self): @@ -805,6 +790,19 @@ def _add_updater(self, code, prepost, objname=None): given (and did not end in a wildcard character). ''' + if prepost == 'pre': + spike_group, group_name = self.source, 'Source' + elif prepost == 'post': + spike_group = self.target, 'Target' + else: + raise ValueError(('"prepost" argument has to be "pre" or "post", ' + 'is "%s".') % prepost) + + if not hasattr(spike_group, 'spikes') and hasattr(spike_group, 'clock'): + raise TypeError(('%s has to be a SpikeSource with spikes and' + ' clock attribute. Is type %r instead') + % (group_name, type(spike_group))) + updater = SynapticPathway(self, code, prepost, objname) objname = updater.objname if hasattr(self, objname): @@ -817,65 +815,53 @@ def _add_updater(self, code, prepost, objname=None): self.contained_objects.append(updater) return objname - def _create_specifiers(self): + def _create_variables(self): ''' - Create the specifiers dictionary for this `NeuronGroup`, containing + Create the variables dictionary for this `Synapses`, containing entries for the equation variables and some standard entries. ''' - # Add all the pre and post specifiers with _pre and _post suffixes - s = {} - for name, spec in getattr(self.source, 'specifiers', {}).iteritems(): - if isinstance(spec, ArrayVariable): - new_spec = ArrayVariable(spec.name, spec.unit, spec.dtype, - spec.array, '_presynaptic_idx', - self.source) - s[name + '_pre'] = new_spec - for name, spec in getattr(self.target, 'specifiers', {}).iteritems(): - if isinstance(spec, ArrayVariable): - new_spec = ArrayVariable(spec.name, spec.unit, spec.dtype, - spec.array, '_postsynaptic_idx', self.target) - s[name + '_post'] = new_spec - # Also add all the post specifiers without a suffix -- if this + # Add all the pre and post variables with _pre and _post suffixes + v = {} + self.variable_indices = defaultdict(lambda: '_idx') + for name, var in getattr(self.source, 'variables', {}).iteritems(): + if isinstance(var, (ArrayVariable, Subexpression)): + v[name + '_pre'] = var + self.variable_indices[name + '_pre'] = '_presynaptic_idx' + for name, var in getattr(self.target, 'variables', {}).iteritems(): + if isinstance(var, (ArrayVariable, Subexpression)): + v[name + '_post'] = var + self.variable_indices[name + '_post'] = '_postsynaptic_idx' + # Also add all the post variables without a suffix -- if this # clashes with the name of a state variable defined in this # Synapses group, the latter will overwrite the entry later and # take precedence - s[name] = new_spec - - # Standard specifiers always present - s.update({'t': AttributeValue('t', second, np.float64, - self.clock, 't_'), - 'dt': AttributeValue('dt', second, np.float64, - self.clock, 'dt_', constant=True), - '_num_neurons': AttributeValue('_num_neurons', Unit(1), - np.int, self, 'N', - constant=True), - '_num_source_neurons':ReadOnlyValue('_num_source_neurons', Unit(1), - np.int32, - len(self.source)), - '_num_target_neurons':ReadOnlyValue('_num_target_neurons', Unit(1), - np.int32, - len(self.target)), + v[name] = var + self.variable_indices[name] = '_postsynaptic_idx' + + # Standard variables always present + v.update({'t': AttributeVariable(second, self.clock, 't_', + constant=False), + 'dt': AttributeVariable(second, self.clock, 'dt_', + constant=True), + '_num_source_neurons': Variable(Unit(1), len(self.source), + constant=True), + '_num_target_neurons': Variable(Unit(1), len(self.target), + constant=True), '_synaptic_pre': DynamicArrayVariable('_synaptic_pre', Unit(1), - np.int32, - self.indices.synaptic_pre, - '_neuron_idx'), + self.item_mapping.synaptic_pre), '_synaptic_post': DynamicArrayVariable('_synaptic_pre', Unit(1), - np.int32, - self.indices.synaptic_post, - '_neuron_idx'), + self.item_mapping.synaptic_post), # We don't need "proper" specifier for these -- they go # back to Python code currently - '_pre_synaptic': ReadOnlyValue('_pre_synaptic', Unit(1), - np.int32, - self.indices.pre_synaptic), - '_post_synaptic': ReadOnlyValue('_post_synaptic', Unit(1), - np.int32, - self.indices.post_synaptic)}) + '_pre_synaptic': Variable(Unit(1), self.item_mapping.pre_synaptic), + '_post_synaptic': Variable(Unit(1), + self.item_mapping.post_synaptic)}) for eq in itertools.chain(self.equations.itervalues(), - self.event_driven.itervalues() if self.event_driven is not None else []): + self.event_driven.itervalues() + if self.event_driven is not None else []): if eq.type in (DIFFERENTIAL_EQUATION, PARAMETER): array = self.arrays[eq.varname] constant = ('constant' in eq.flags) @@ -883,30 +869,34 @@ def _create_specifiers(self): # shouldn't directly access the specifier.array attribute but # use specifier.get_value() to get a reference to the underlying # array - s.update({eq.varname: SynapticArrayVariable(eq.varname, - eq.unit, - array.dtype, - array, - '_neuron_idx', - self, - constant=constant, - is_bool=eq.is_bool)}) - + v[eq.varname] = DynamicArrayVariable(eq.varname, + eq.unit, + array, + group_name=self.name, + constant=constant, + is_bool=eq.is_bool) + if eq.varname in self.variable_indices: + # we are overwriting a postsynaptic variable of the same + # name, delete the reference to the postsynaptic index + del self.variable_indices[eq.varname] + # Register the array with the `SynapticItemMapping` object so + # it gets automatically resized + self.item_mapping.register_variable(array) elif eq.type == STATIC_EQUATION: - s.update({eq.varname: Subexpression(eq.varname, eq.unit, + v.update({eq.varname: Subexpression(eq.unit, brian_prefs['core.default_scalar_dtype'], str(eq.expr), - s, - self.namespace, + variables=v, + namespace=self.namespace, is_bool=eq.is_bool)}) else: raise AssertionError('Unknown type of equation: ' + eq.eq_type) # Stochastic variables for xi in self.equations.stochastic_variables: - s.update({xi: StochasticVariable(xi)}) + v.update({xi: StochasticVariable()}) - return s + return v def _allocate_memory(self, dtype=None): # Allocate memory (TODO: this should be refactored somewhere at some point) @@ -926,9 +916,7 @@ def _allocate_memory(self, dtype=None): curdtype = dtype if curdtype is None: curdtype = brian_prefs['core.default_scalar_dtype'] - arrays[name] = DynamicArray1D(0, use_numpy_resize=True, - dtype=curdtype, - refcheck=False) + arrays[name] = DynamicArray1D(0) logger.debug("NeuronGroup memory allocated successfully.") return arrays @@ -968,7 +956,7 @@ def connect(self, pre_or_cond, post=None, p=1., n=1, level=0): boolean value. If it is an index, then also `post` has to be given. post_neurons : {int, ndarray of int), optional - Indices of neurons from the target group. Non-optional if one or + GroupIndices of neurons from the target group. Non-optional if one or more presynaptic indices have been given. p : float, optional The probability to create `n` synapses wherever the condition @@ -1002,15 +990,14 @@ def connect(self, pre_or_cond, post=None, p=1., n=1, level=0): raise TypeError(('Presynaptic indices can only be combined ' 'with postsynaptic integer indices))')) if isinstance(n, basestring): - raise TypeError(('Indices cannot be combined with a string' + raise TypeError(('GroupIndices cannot be combined with a string' 'expression for n. Either use an array/scalar ' 'for n, or a string expression for the ' 'connections')) i, j, n = np.broadcast_arrays(pre_or_cond, post, n) if i.ndim > 1: raise ValueError('Can only use 1-dimensional indices') - - self.indices._add_synapses(i, j, n, p, level=level+1) + self.item_mapping._add_synapses(i, j, n, p, level=level+1) elif isinstance(pre_or_cond, (basestring, bool)): if pre_or_cond is False: return # nothing to do... @@ -1026,8 +1013,8 @@ def connect(self, pre_or_cond, post=None, p=1., n=1, level=0): if not isinstance(p, (float, basestring)): raise TypeError('p has to be a float or a string evaluating ' 'to an float, is type %s instead.' % type(n)) - self.indices._add_synapses(None, None, n, p, condition=pre_or_cond, - level=level+1) + self.item_mapping._add_synapses(None, None, n, p, condition=pre_or_cond, + level=level+1) else: raise TypeError(('First argument has to be an index or a ' 'string, is %s instead.') % type(pre_or_cond)) diff --git a/brian2/tests/test_codegen.py b/brian2/tests/test_codegen.py index 6a05bdfa5..b76c596cf 100644 --- a/brian2/tests/test_codegen.py +++ b/brian2/tests/test_codegen.py @@ -1,7 +1,7 @@ import numpy as np from brian2.codegen.translation import analyse_identifiers, get_identifiers_recursively -from brian2.core.specifiers import Subexpression, Specifier +from brian2.core.variables import Subexpression, Variable from brian2.units.fundamentalunits import Unit @@ -26,13 +26,13 @@ def test_get_identifiers_recursively(): ''' Test finding identifiers including subexpressions. ''' - specifiers = {} - specifiers['sub1'] = Subexpression('sub1', Unit(1), np.float32, 'sub2 * z', - specifiers, {}) - specifiers['sub2'] = Subexpression('sub2', Unit(1), np.float32, '5 + y', - specifiers, {}) - specifiers['x'] = Specifier('x') - identifiers = get_identifiers_recursively('_x = sub1 + x', specifiers) + variables = {} + variables['sub1'] = Subexpression(Unit(1), np.float32, 'sub2 * z', + variables, {}) + variables['sub2'] = Subexpression(Unit(1), np.float32, '5 + y', + variables, {}) + variables['x'] = Variable(unit=None) + identifiers = get_identifiers_recursively('_x = sub1 + x', variables) assert identifiers == set(['x', '_x', 'y', 'z', 'sub1', 'sub2']) if __name__ == '__main__': diff --git a/brian2/tests/test_equations.py b/brian2/tests/test_equations.py index a89c80c93..9bf0b94f8 100644 --- a/brian2/tests/test_equations.py +++ b/brian2/tests/test_equations.py @@ -249,7 +249,7 @@ def test_construction_errors(): x = 3 * w : 1''')) def test_unit_checking(): - # dummy Specifier class + # dummy Variable class class S(object): def __init__(self, unit): self.unit = unit diff --git a/brian2/tests/test_functions.py b/brian2/tests/test_functions.py index 12a9e64f3..f45de321d 100644 --- a/brian2/tests/test_functions.py +++ b/brian2/tests/test_functions.py @@ -40,10 +40,11 @@ def test_math_functions(): else: func_name = func.__name__ G = NeuronGroup(len(test_array), - '''func = {func}(test_array) : 1'''.format(func=func_name), + '''func = {func}(variable) : 1 + variable : 1'''.format(func=func_name), clock=clock, codeobj_class=codeobj_class) - #G.variable = test_array + G.variable = test_array mon = StateMonitor(G, 'func', record=True) net = Network(G, mon) net.run(clock.dt) @@ -53,7 +54,9 @@ def test_math_functions(): # Functions/operators scalar = 3 - for func, operator in [(np.power, '**'), (np.mod, '%')]: + # TODO: We are not testing the modulo operator here since it does + # not work for double values in C + for func, operator in [(np.power, '**')]: # Calculate the result directly numpy_result = func(test_array, scalar) @@ -62,10 +65,11 @@ def test_math_functions(): # static equation in a NeuronGroup clock = Clock() G = NeuronGroup(len(test_array), - '''func = test_array {op} scalar : 1'''.format(op=operator), + '''func = variable {op} scalar : 1 + variable : 1'''.format(op=operator), clock=clock, codeobj_class=codeobj_class) - #G.variable = test_array + G.variable = test_array mon = StateMonitor(G, 'func', record=True) net = Network(G, mon) net.run(clock.dt) diff --git a/brian2/tests/test_namespaces.py b/brian2/tests/test_namespaces.py index 3e338a96c..c92d55c7b 100644 --- a/brian2/tests/test_namespaces.py +++ b/brian2/tests/test_namespaces.py @@ -24,7 +24,7 @@ def test_default_content(): assert namespace['ms'] == ms assert namespace['Hz'] == Hz assert namespace['mV'] == mV - # Functions (the namespace contains specifiers) + # Functions (the namespace contains variables) assert namespace['sin'].pyfunc == sin assert namespace['log'].pyfunc == log assert namespace['exp'].pyfunc == exp diff --git a/brian2/tests/test_neurongroup.py b/brian2/tests/test_neurongroup.py index e4c2ebed3..8df2706dc 100644 --- a/brian2/tests/test_neurongroup.py +++ b/brian2/tests/test_neurongroup.py @@ -4,8 +4,9 @@ from brian2.groups.neurongroup import NeuronGroup from brian2.core.network import Network from brian2.core.clocks import defaultclock -from brian2.units.fundamentalunits import DimensionMismatchError -from brian2.units.allunits import second +from brian2.units.fundamentalunits import (DimensionMismatchError, + have_same_dimensions) +from brian2.units.allunits import second, volt from brian2.units.stdunits import ms, mV from brian2.codegen.runtime.weave_rt import WeaveCodeObject from brian2.codegen.runtime.numpy_rt import NumpyCodeObject @@ -43,15 +44,15 @@ def test_creation(): assert_raises(TypeError, lambda: NeuronGroup(1, object())) -def test_specifiers(): +def test_variables(): ''' - Test the correct creation of the specifiers dictionary. + Test the correct creation of the variables dictionary. ''' G = NeuronGroup(1, 'dv/dt = -v/(10*ms) : 1') - assert 'v' in G.specifiers and 't' in G.specifiers and 'dt' in G.specifiers + assert 'v' in G.variables and 't' in G.variables and 'dt' in G.variables G = NeuronGroup(1, 'dv/dt = -v/tau + xi*tau**-0.5: 1') - assert not 'tau' in G.specifiers and 'xi' in G.specifiers + assert not 'tau' in G.variables and 'xi' in G.variables def test_stochastic_variable(): @@ -185,7 +186,6 @@ def test_syntax_errors(): # We do not specify the exact type of exception here: Python throws a # SyntaxError while C++ results in a ValueError - for codeobj_class in codeobj_classes: # Syntax error in threshold @@ -232,13 +232,42 @@ def test_state_variables(): assert_raises(DimensionMismatchError, lambda: G.v.__iadd__(3*second)) assert_raises(DimensionMismatchError, lambda: G.v.__iadd__(3)) assert_raises(DimensionMismatchError, lambda: G.v.__imul__(3*second)) - # and even with strings - G.v -= 'i*mV' + + +def test_state_variable_access(): + G = NeuronGroup(10, 'v:volt') + G.v = np.arange(10) * volt + + assert_equal(np.asarray(G.v[:]), np.arange(10)) + assert have_same_dimensions(G.v[:], volt) + assert_equal(np.asarray(G.v[:]), G.v_[:]) + # Accessing single elements, slices and arrays + assert G.v[5] == 5 * volt + assert G.v_[5] == 5 + assert_equal(G.v[:5], np.arange(5) * volt) + assert_equal(G.v_[:5], np.arange(5)) + assert_equal(G.v[[0, 5]], [0, 5] * volt) + assert_equal(G.v_[[0, 5]], np.array([0, 5])) + + # Illegal indexing + assert_raises(IndexError, lambda: G.v[0, 0]) + assert_raises(IndexError, lambda: G.v_[0, 0]) + assert_raises(TypeError, lambda: G.v[object()]) + assert_raises(TypeError, lambda: G.v_[object()]) + + # Indexing with strings + assert G.v['i==2'] == G.v[2] + assert G.v_['i==2'] == G.v_[2] + assert_equal(G.v['v >= 3*volt'], G.v[3:]) + assert_equal(G.v_['v >= 3*volt'], G.v_[3:]) + # Should also check for units + assert_raises(DimensionMismatchError, lambda: G.v['v >= 3']) + assert_raises(DimensionMismatchError, lambda: G.v['v >= 3*second']) if __name__ == '__main__': test_creation() - test_specifiers() + test_variables() test_stochastic_variable() test_unit_errors() test_threshold_reset() @@ -246,4 +275,5 @@ def test_state_variables(): test_incomplete_namespace() test_namespace_errors() test_syntax_errors() - test_state_variables() \ No newline at end of file + test_state_variables() + test_state_variable_access() \ No newline at end of file diff --git a/brian2/tests/test_parsing.py b/brian2/tests/test_parsing.py index 15644392f..306896028 100644 --- a/brian2/tests/test_parsing.py +++ b/brian2/tests/test_parsing.py @@ -170,8 +170,8 @@ def test_abstract_code_dependencies(): def test_is_boolean_expression(): - # dummy "specifier" class - Spec = namedtuple("Spec", ['is_bool']) + # dummy "Variable" class + Var = namedtuple("Var", ['is_bool']) # dummy function object class Func(object): @@ -185,12 +185,12 @@ def __init__(self, returns_bool=False): f = Func(returns_bool=True) g = Func(returns_bool=False) - # specifier - s1 = Spec(is_bool=True) - s2 = Spec(is_bool=False) + # variables + s1 = Var(is_bool=True) + s2 = Var(is_bool=False) namespace = {'a': a, 'b': b, 'c': c, 'f': f, 'g': g} - specifiers = {'s1': s1, 's2': s2} + variables = {'s1': s1, 's2': s2} EVF = [ (True, 'a or b'), @@ -208,20 +208,20 @@ def __init__(self, returns_bool=False): (True, 'f(c) or a automatic choice no longer works StateUpdateMethod.stateupdaters = {} - assert_raises(ValueError, lambda: determine_stateupdater(eqs, specifiers)) + assert_raises(ValueError, lambda: determine_stateupdater(eqs, variables)) # reset to state before the test StateUpdateMethod.stateupdaters = before diff --git a/brian2/tests/test_subgroup.py b/brian2/tests/test_subgroup.py new file mode 100644 index 000000000..cddee65ed --- /dev/null +++ b/brian2/tests/test_subgroup.py @@ -0,0 +1,164 @@ +import numpy as np +from numpy.testing.utils import assert_raises, assert_equal, assert_allclose + +from brian2 import * + +# We can only test C++ if weave is availabe +try: + import scipy.weave + codeobj_classes = [NumpyCodeObject, WeaveCodeObject] +except ImportError: + # Can't test C++ + codeobj_classes = [NumpyCodeObject] + + +def test_state_variables(): + ''' + Test the setting and accessing of state variables in subgroups. + ''' + for codeobj_class in codeobj_classes: + G = NeuronGroup(10, 'v : volt', codeobj_class=codeobj_class) + SG = G[4:9] + assert_raises(DimensionMismatchError, lambda: SG.__setattr__('v', -70)) + SG.v_ = float(-80*mV) + assert_allclose(G.v, + np.array([0, 0, 0, 0, -80, -80, -80, -80, -80, 0])*mV) + assert_allclose(SG.v, + np.array([-80, -80, -80, -80, -80])*mV) + assert_allclose(G.v_, + np.array([0, 0, 0, 0, -80, -80, -80, -80, -80, 0])*mV) + assert_allclose(SG.v_, + np.array([-80, -80, -80, -80, -80])*mV) + # You should also be able to set variables with a string + SG.v = 'v + i*mV' + assert_allclose(SG.v[0], -80*mV) + assert_allclose(SG.v[4], -76*mV) + assert_allclose(G.v[4:9], -80*mV + np.arange(5)*mV) + + # Calculating with state variables should work too + assert all(G.v[4:9] - SG.v == 0) + + # And in-place modification should work as well + SG.v += 10*mV + assert_allclose(G.v[4:9], -70*mV + np.arange(5)*mV) + SG.v *= 2 + assert_allclose(G.v[4:9], 2*(-70*mV + np.arange(5)*mV)) + # with unit checking + assert_raises(DimensionMismatchError, lambda: SG.v.__iadd__(3*second)) + assert_raises(DimensionMismatchError, lambda: SG.v.__iadd__(3)) + assert_raises(DimensionMismatchError, lambda: SG.v.__imul__(3*second)) + + +def test_state_monitor(): + for codeobj_class in codeobj_classes: + G = NeuronGroup(10, 'v : volt', codeobj_class=codeobj_class) + G.v = np.arange(10) * volt + SG = G[5:] + mon_all = StateMonitor(SG, 'v', record=True) + mon_0 = StateMonitor(SG, 'v', record=0) + net = Network(G, SG, mon_all, mon_0) + net.run(defaultclock.dt) + + assert_equal(mon_0[0].v, mon_all[0].v) + assert_equal(mon_0[0].v, np.array([5]) * volt) + assert_equal(mon_all.v.flatten(), np.arange(5, 10) * volt) + + assert_raises(IndexError, lambda: mon_all[5]) + + +def test_synapse_creation(): + for codeobj_class in codeobj_classes: + G1 = NeuronGroup(10, 'v:1', codeobj_class=codeobj_class) + G2 = NeuronGroup(20, 'v:1', codeobj_class=codeobj_class) + SG1 = G1[:5] + SG2 = G2[10:] + S = Synapses(SG1, SG2, 'w:1', pre='v+=w', codeobj_class=codeobj_class) + S.connect(2, 2) # Should correspond to (2, 12) + S.connect('i==4 and j==5') # Should correspond to (4, 15) + + # Only relative numbers are stored + assert_equal(S.item_mapping.synaptic_pre, np.array([2, 4])) + assert_equal(S.item_mapping.synaptic_post, np.array([2, 5])) + assert_equal(S.i[:], np.array([2, 4])) + assert_equal(S.j[:], np.array([2, 5])) + + +def test_synapse_access(): + for codeobj_class in codeobj_classes: + G1 = NeuronGroup(10, 'v:1', codeobj_class=codeobj_class) + G2 = NeuronGroup(20, 'v:1', codeobj_class=codeobj_class) + SG1 = G1[:5] + SG2 = G2[10:] + S = Synapses(SG1, SG2, 'w:1', pre='v+=w', codeobj_class=codeobj_class) + S.connect(True) + S.w['j == 0'] = 5 + assert all(S.w['j==0'] == 5) + S.w[2, 2] = 7 + assert all(S.w['i==2 and j==2'] == 7) + S.w = '2*j' + assert all(S.w[:, 1] == 2) + + assert len(S.w[:, 10]) == 0 + assert len(S.w['j==10']) == 0 + + +def test_synaptic_propagation(): + for codeobj_class in codeobj_classes: + G1 = NeuronGroup(10, 'v:1', threshold='v>1', reset='v=0', + codeobj_class=codeobj_class) + G1.v[1::2] = 1.1 # odd numbers should spike + G2 = NeuronGroup(20, 'v:1', codeobj_class=codeobj_class) + SG1 = G1[1:6] + SG2 = G2[10:] + S = Synapses(SG1, SG2, pre='v+=1', codeobj_class=codeobj_class) + S.connect('i==j') + net = Network(G1, G2, S) + net.run(defaultclock.dt) + expected = np.zeros(len(G2)) + # Neurons 1, 3, 5 spiked and are connected to 10, 12, 14 + expected[[10, 12, 14]] = 1 + assert_equal(np.asarray(G2.v).flatten(), expected) + + +def test_spike_monitor(): + for codeobj_class in codeobj_classes: + G = NeuronGroup(10, 'v:1', threshold='v>1', reset='v=0', + codeobj_class=codeobj_class) + G.v[0] = 1.1 + G.v[2] = 1.1 + G.v[5] = 1.1 + SG = G[3:] + s_mon = SpikeMonitor(G, codeobj_class=codeobj_class) + sub_s_mon = SpikeMonitor(SG, codeobj_class=codeobj_class) + net = Network(G, s_mon, sub_s_mon) + net.run(defaultclock.dt) + assert_equal(s_mon.i, np.array([0, 2, 5])) + assert_equal(s_mon.t_, np.zeros(3)) + assert_equal(sub_s_mon.i, np.array([2])) + assert_equal(sub_s_mon.t_, np.zeros(1)) + expected = np.zeros(10, dtype=int) + expected[[0, 2, 5]] = 1 + assert_equal(s_mon.count, expected) + expected = np.zeros(7, dtype=int) + expected[[2]] = 1 + assert_equal(sub_s_mon.count, expected) + + +def test_wrong_indexing(): + G = NeuronGroup(10, 'v:1') + assert_raises(TypeError, lambda: G[0]) + assert_raises(TypeError, lambda: G[[0, 1]]) + assert_raises(TypeError, lambda: G['string']) + + assert_raises(IndexError, lambda: G[10:]) + assert_raises(IndexError, lambda: G[::2]) + assert_raises(IndexError, lambda: G[3:2]) + +if __name__ == '__main__': + test_state_variables() + test_state_monitor() + test_synapse_creation() + test_synapse_access() + test_synaptic_propagation() + test_spike_monitor() + test_wrong_indexing() \ No newline at end of file diff --git a/docs_sphinx/developer/codegen.rst b/docs_sphinx/developer/codegen.rst index 98d56d2a1..535485817 100644 --- a/docs_sphinx/developer/codegen.rst +++ b/docs_sphinx/developer/codegen.rst @@ -10,12 +10,20 @@ a single `NeuronGroup` object: - Parse the equations, add refractoriness to them: this isn't really part of code generation. + - Allocate memory for the state variables. + - Create a namespace object. + - Create `Thresholder`, `Resetter` and `StateUpdater` objects. - - Collect `Specifier` objects from the group and code template. - - Resolve the namespace, i.e. for hierarchical namespace choose just one value for each variable name. + + - Collect `Variable` objects from the group and code template. + + - Resolve the namespace, i.e. for hierarchical namespace choose just one + value for each variable name. + - Create a `CodeObject`. + - At runtime, each object calls `CodeObject.__call__` to execute the code. Stages of code generation @@ -109,7 +117,7 @@ language. This is handled in ``brian2.codegen.templates``. An example of a template for Python thresholding:: - # USE_SPECIFIERS { not_refractory, lastspike, t } + # USES_VARIABLES { not_refractory, lastspike, t } {% for line in code_lines %} {{line}} {% endfor %} @@ -120,7 +128,7 @@ An example of a template for Python thresholding:: and the output code from the example equations above:: - # USE_SPECIFIERS { not_refractory, lastspike, t } + # USES_VARIABLES { not_refractory, lastspike, t } v = _array_neurongroup_v _cond = v > 10 * mV _return_values, = _cond.nonzero() @@ -162,15 +170,15 @@ Namespace resolution means creating a simple name to value mapping from a nested hierarchy, i.e. selecting which value to use in the case of multiple possibilities, and removing the units. See :doc:`equations_namespaces` for more details. -Specifiers +Variable ---------- -Specifiers are objects derived from `Specifier`, and contain information about the variable -they correspond to, including details like the name, the data type, whether it is a single value +`Variable` objects contain information about the variable +they correspond to, including details like the data type, whether it is a single value or an array, etc. -See ``brian2.core.specifiers`` and, e.g. `Group._create_specifiers`, -`NeuronGroup._create_specifiers`. +See ``brian2.core.variables`` and, e.g. `Group._create_variables`, +`NeuronGroup._create_variables`. Templates --------- diff --git a/docs_sphinx/developer/equations_namespaces.rst b/docs_sphinx/developer/equations_namespaces.rst index c9e7abde6..b5759a7f2 100644 --- a/docs_sphinx/developer/equations_namespaces.rst +++ b/docs_sphinx/developer/equations_namespaces.rst @@ -14,7 +14,7 @@ Specifiers Each Brian object that saves state variables (e.g. `NeuronGroup`, `Synapses`, `StateMonitor`) has a ``specifiers`` attribute, a dictionary mapping variable -names to `Specifier` objects. `Specifier` objects contain information *about* +names to `Variable` objects. `Variable` objects contain information *about* the variable (name, dtype, units) as well as access to the variable's value via a ``get_value`` method. Some will also allow setting the values via a corresponding ``set_value`` method. These objects can therefore act as proxies diff --git a/docs_sphinx/developer/overview.rst b/docs_sphinx/developer/overview.rst index 42ee4b6ef..765cd358d 100644 --- a/docs_sphinx/developer/overview.rst +++ b/docs_sphinx/developer/overview.rst @@ -52,32 +52,52 @@ units (because this requires knowledge of the external namespace, or the pre- and postsynaptic groups in the case of synapses) or correct flags (because they differ between `NeuronGroup` and `Synapses`, for example). -Specifiers and namespaces +Variables and namespaces ------------------------- Objects referring to variables and functions, in particular `NeuronGroup` and `Synapses` have two dictionary-like attributes: ``namespace`` -and ``specifiers``. The *namespace* is related to everything external to the +and ``variables``. The *namespace* is related to everything external to the model itself, i.e. variables and functions defined outside of the model equations. It by default consists of a set of standard units and functions and optionally of an explicitly given namespace. If no namespace is given explicitly, the namespace used for running code will be filled at the time of a run with either the namespace provided to the run function or the -locals/globals surrounding the run call. *Specifiers* on the other hand, +locals/globals surrounding the run call. *Variables* on the other hand, define everything *internal* to the model, the objects in this dictionary -inherit from `Specifier` and -- in addition to specifying things like the units +inherit from `Variable` and -- in addition to specifying things like the units -- act as proxy objects, connecting for example state variable names to the numpy arrays where the values are actually stored. This indirection will be useful when dealing with memory on devices. The -specifiers also offer an explicit and simple way to implement linked variables +variables also offer an explicit and simple way to implement linked variables or the access to pre- and postsynaptic variables in `Synapses`: To link the symbol ``v_post`` to the postsynaptic membrane potentials, the specifier -dictionary just has to contain a reference to the respective `Specifier` object +dictionary just has to contain a reference to the respective `Variable` object of the target group under the key ``v_post``. Another parent class of `NeuronGroup` is `Group`, which also relies on the -`Specifier` objects and exposes access to the state variables as attributes. -This is also used in classes such as `StateMonitor`. +`Variable` objects and exposes access to the state variables as attributes. + +`Variable` objects only exist once for every variable, e.g. the `Synapses` +class contains references to the `Variable` objects of the pre- and postsynaptic +classes. + +Indices +------- + +To handle arrays in generated code correctly, information about *indexing* has +to be stored as well. Every `Group` has two attributes responsible for that: +``indices``, a mapping from index names to index objects and ``variable_indices``, +a mapping from variable names to index names. An index object needs to have +a length and be indexable -- e.g. it could be a numpy array. + +For simple classes such as `NeuronGroup` that only have a single index for all +variables (``'_idx'``), nothing needs to be done. ``Group`` automatically +creates an ``indices`` attribute mapping ``'_idx'`` to the ``item_mapping`` +and a ``variable_indices`` attribute that maps everything to ``'_idx'``. + +TODO: Explain ``item_mapping`` + State Updaters -------------- diff --git a/examples/synapses_gapjunctions.py b/examples/synapses_gapjunctions.py index 02dc5b547..9950c3809 100755 --- a/examples/synapses_gapjunctions.py +++ b/examples/synapses_gapjunctions.py @@ -30,6 +30,6 @@ run(500*ms) -plt.plot(trace.t / ms, trace.v[:, 0]) -plt.plot(trace.t / ms, trace.v[:, 1]) +plt.plot(trace.t / ms, trace[0].v) +plt.plot(trace.t / ms, trace[5].v) plt.show() diff --git a/examples/synapses_nonlinear.py b/examples/synapses_nonlinear.py index 7259e3480..58e19fd03 100755 --- a/examples/synapses_nonlinear.py +++ b/examples/synapses_nonlinear.py @@ -30,7 +30,7 @@ import matplotlib.pyplot as plt plt.subplot(2, 1, 1) -plt.plot(M.t / ms, M.g) +plt.plot(M.t / ms, M.g.T) plt.subplot(2, 1, 2) -plt.plot(Mn.t / ms, Mn.g) +plt.plot(Mn.t / ms, Mn[0].g) plt.show() \ No newline at end of file diff --git a/examples/synapses_state_variables.py b/examples/synapses_state_variables.py index 7c16cbb80..138b25315 100644 --- a/examples/synapses_state_variables.py +++ b/examples/synapses_state_variables.py @@ -8,7 +8,8 @@ #brian_prefs.codegen.target = 'weave' G = NeuronGroup(100, 'v:volt') -G.v = '(sin(2*pi*i/_num_neurons) - 70 + 0.25*randn()) * mV' +N = len(G) +G.v = '(sin(2*pi*i/N) - 70 + 0.25*randn()) * mV' S = Synapses(G, G, 'w:volt', pre='v+=w') S.connect('True')