Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implementation of LIF reset models in CPU backend #415

Merged
merged 41 commits into from
Oct 14, 2022
Merged
Show file tree
Hide file tree
Changes from 29 commits
Commits
Show all changes
41 commits
Select commit Hold shift + click to select a range
141e732
update refport unittest to always wait when it writes to port for con…
bamsumit Mar 8, 2022
7097dd8
Removed pyproject changes
bamsumit Mar 8, 2022
5234c1b
Merge branch 'lava-nc:main' into main
bamsumit Mar 8, 2022
2644255
Merge branch 'lava-nc:main' into main
bamsumit Mar 8, 2022
a3116bb
Merge branch 'lava-nc:main' into main
bamsumit Apr 20, 2022
31715fc
Merge branch 'lava-nc:main' into main
bamsumit Apr 21, 2022
f90595d
Merge branch 'main' of github.com:lava-nc/lava into main
bamsumit Apr 21, 2022
c988ad8
Merge branch 'main' of github.com:bamsumit/lava into main
bamsumit Apr 21, 2022
be2ba52
Merge branch 'lava-nc:main' into main
bamsumit May 20, 2022
1fb7353
Fix to convolution tests. Fixed imcompatible mnist_pretrained for old…
bamsumit May 20, 2022
4a58d97
merged with upstream
bamsumit Jul 15, 2022
d4636c3
Merge branch 'main' of github.com:lava-nc/lava into main
bamsumit Jul 27, 2022
68c817c
Missing moudle parent fix
bamsumit Jul 28, 2022
1b59e58
Merge branch 'main' of github.com:lava-nc/lava into main
bamsumit Jul 29, 2022
9099529
Merge branch 'main' of github.com:lava-nc/lava into main
bamsumit Aug 1, 2022
fd5b813
Merge branch 'main' of github.com:lava-nc/lava into main
bamsumit Aug 25, 2022
fd2163c
Added ConvVarModel
joyeshmishra Aug 26, 2022
2fb345c
Merge branch 'main' of github.com:lava-nc/lava into main
bamsumit Aug 29, 2022
8e73642
Merge branch 'conv_manager' of github.com:lava-nc/lava into main
bamsumit Aug 29, 2022
c441098
Merge branch 'main' of github.com:lava-nc/lava into main
bamsumit Sep 2, 2022
964c5ca
Merge branch 'main' into main
bamsumit Sep 16, 2022
8b2e8a2
Merged with lava-nc main
bamsumit Sep 19, 2022
4431f61
Merge branch 'main' of github.com:bamsumit/lava into main
bamsumit Sep 19, 2022
7e154fc
Merge branch 'main' of github.com:lava-nc/lava into main
bamsumit Sep 27, 2022
c5b5509
Merge branch 'main' of github.com:lava-nc/lava into main
bamsumit Sep 28, 2022
2241e23
Merge branch 'main' of github.com:lava-nc/lava into main
bamsumit Sep 29, 2022
95545a2
Merge branch 'main' of github.com:lava-nc/lava into main
bamsumit Sep 29, 2022
83c5a6c
LIF reset python model implementation
bamsumit Oct 13, 2022
7b2c726
Merge branch 'main' into lif_reset
bamsumit Oct 13, 2022
2388636
Update ci-build.yml
PhilippPlank Oct 14, 2022
57e813f
Update ci-build.yml
PhilippPlank Oct 14, 2022
42c9356
Update ci-build.yml
PhilippPlank Oct 14, 2022
4412573
Update ci-build.yml
PhilippPlank Oct 14, 2022
d9c47ec
Update ci-build.yml
PhilippPlank Oct 14, 2022
4e727bb
Update ci-build.yml
PhilippPlank Oct 14, 2022
cc54299
Update ci-build.yml
PhilippPlank Oct 14, 2022
47e1fb7
Update ci-build.yml
PhilippPlank Oct 14, 2022
db49294
Update ci-build.yml
PhilippPlank Oct 14, 2022
550fbf9
Update ci-build.yml
PhilippPlank Oct 14, 2022
9ab2bbd
Update ci-build.yml
PhilippPlank Oct 14, 2022
c4eb67a
Update ci-build.yml
PhilippPlank Oct 14, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
6 changes: 4 additions & 2 deletions src/lava/proc/io/encoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,8 @@ def __init__(self, proc_params: Optional[Dict] = None):

def run_spk(self):
self.s_out.send(self.s_out_buf)
a_in_data = np.left_shift(self.a_in.recv(), self.spike_exp)
a_in_data = np.left_shift(self.a_in.recv().astype(int),
self.spike_exp)
self.s_out_buf = self.encode_delta(a_in_data)


Expand Down Expand Up @@ -235,7 +236,8 @@ def encode_delta_sparse_8(self, s_out):
def run_spk(self):
self.s_out.send(self.data, self.idx)
# Receive synaptic input
a_in_data = np.left_shift(self.a_in.recv(), self.spike_exp)
a_in_data = np.left_shift(self.a_in.recv().astype(int),
self.spike_exp)
s_out = self.encode_delta(a_in_data)
if self.compression == Compression.SPARSE:
self.data, self.idx = self.encode_sparse(s_out)
Expand Down
109 changes: 108 additions & 1 deletion src/lava/proc/lif/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from lava.magma.core.resources import CPU
from lava.magma.core.decorator import implements, requires, tag
from lava.magma.core.model.py.model import PyLoihiProcessModel
from lava.proc.lif.process import LIF, TernaryLIF
from lava.proc.lif.process import LIF, LIFReset, TernaryLIF


class AbstractPyLifModelFloat(PyLoihiProcessModel):
Expand Down Expand Up @@ -320,3 +320,110 @@ def reset_voltage(self, spike_vector: np.ndarray):
"""Reset voltage of all spiking neurons to 0.
"""
self.v[spike_vector != 0] = 0 # Reset voltage to 0 wherever we spiked


@implements(proc=LIFReset, protocol=LoihiProtocol)
@requires(CPU)
@tag('floating_pt')
class PyLifResetModelFloat(AbstractPyLifModelFloat):
"""Implementation of Leaky-Integrate-and-Fire neural process with reset
bamsumit marked this conversation as resolved.
Show resolved Hide resolved
in floating point precision.
"""
s_out: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, float)
vth: float = LavaPyType(float, float)

def __init__(self, proc_params):
super(PyLifResetModelFloat, self).__init__(proc_params)
self.reset_interval = proc_params['reset_interval']
self.reset_offset = (proc_params['reset_offset']) % self.reset_interval

def spiking_activation(self):
"""Spiking activation function for LIF.
"""
return self.v > self.vth

def run_spk(self):
"""The run function that performs the actual computation during
execution orchestrated by a PyLoihiProcessModel using the
LoihiProtocol.
"""
# Receive synaptic input
a_in_data = self.a_in.recv()

if (self.time_step % self.reset_interval) == self.reset_offset:
self.u *= 0
bamsumit marked this conversation as resolved.
Show resolved Hide resolved
self.v *= 0

self.subthr_dynamics(activation_in=a_in_data)

s_out = self.spiking_activation()

# Reset voltage of spiked neurons to 0
self.reset_voltage(spike_vector=s_out)
self.s_out.send(s_out)


@implements(proc=LIFReset, protocol=LoihiProtocol)
@requires(CPU)
@tag('bit_accurate_loihi', 'fixed_pt')
class PyLifResetModelBitAcc(AbstractPyLifModelFixed):
"""Implementation of Leaky-Integrate-and-Fire neural process with reset
bit-accurate with Loihi's hardware LIF dynamics, which means, it mimics
Loihi behaviour.

Precisions of state variables

- du: unsigned 12-bit integer (0 to 4095)
- dv: unsigned 12-bit integer (0 to 4095)
- bias_mant: signed 13-bit integer (-4096 to 4095). Mantissa part of neuron
bias.
- bias_exp: unsigned 3-bit integer (0 to 7). Exponent part of neuron bias.
- vth: unsigned 17-bit integer (0 to 131071).

"""
s_out: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, np.int32, precision=24)
vth: int = LavaPyType(int, np.int32, precision=17)

def __init__(self, proc_params):
super(PyLifResetModelBitAcc, self).__init__(proc_params)
self.effective_vth = 0
self.reset_interval = proc_params['reset_interval']
self.reset_offset = (proc_params['reset_offset']) % self.reset_interval

def scale_threshold(self):
"""Scale threshold according to the way Loihi hardware scales it. In
Loihi hardware, threshold is left-shifted by 6-bits to MSB-align it
with other state variables of higher precision.
"""
self.effective_vth = np.left_shift(self.vth, self.vth_shift)
self.isthrscaled = True

def spiking_activation(self):
"""Spike when voltage exceeds threshold.
"""
return self.v > self.effective_vth

def run_spk(self):
"""The run function that performs the actual computation during
execution orchestrated by a PyLoihiProcessModel using the
LoihiProtocol.
"""
# Receive synaptic input
a_in_data = self.a_in.recv()

if (self.time_step % self.reset_interval) == self.reset_offset:
self.u *= 0
self.v *= 0

self.scale_bias()

if not self.isthrscaled:
self.scale_threshold()

self.subthr_dynamics(activation_in=a_in_data)

s_out = self.spiking_activation()

# Reset voltage of spiked neurons to 0
self.reset_voltage(spike_vector=s_out)
self.s_out.send(s_out)
95 changes: 93 additions & 2 deletions tests/lava/proc/lif/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,11 @@
from lava.magma.core.process.process import AbstractProcess
from lava.magma.core.process.variable import Var
from lava.magma.core.resources import CPU
from lava.magma.core.run_configs import RunConfig
from lava.magma.core.run_configs import Loihi1SimCfg, Loihi2SimCfg, RunConfig
from lava.magma.core.run_conditions import RunSteps
from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol
from lava.proc.lif.process import LIF, TernaryLIF
from lava.proc.lif.process import LIF, LIFReset, TernaryLIF
from lava.proc import io


class LifRunConfig(RunConfig):
Expand Down Expand Up @@ -732,3 +733,93 @@ def test_fixed_pm_neg_impulse_dv(self):
lif_v_float = np.right_shift(np.array(lif_v), 6)
self.assertListEqual(expected_v_timeseries, lif_v)
self.assertListEqual(expected_float_v, lif_v_float.tolist())


class TestTLIFReset(unittest.TestCase):
"""Test LIF reset process models"""

def test_float_model(self):
"""Test float model"""
num_neurons = 10
num_steps = 16
reset_interval = 4
reset_offset = 3

lif_reset = LIFReset(shape=(num_neurons,),
u=np.arange(num_neurons),
du=0,
dv=0,
vth=100,
bias_mant=np.arange(num_neurons) + 1,
reset_interval=reset_interval,
reset_offset=reset_offset)

u_logger = io.sink.Read(buffer=num_steps)
v_logger = io.sink.Read(buffer=num_steps)

u_logger.connect_var(lif_reset.u)
v_logger.connect_var(lif_reset.v)

lif_reset.run(condition=RunSteps(num_steps),
run_cfg=Loihi2SimCfg())
u = u_logger.data.get()
v = v_logger.data.get()
lif_reset.stop()

# Lava timesteps start from t=0. So the first reset offset is missed.
u_gt_pre = np.vstack([np.arange(num_neurons)] * 2).T
u_gt_post = np.zeros((num_neurons, num_steps - reset_offset + 1))

dt = (1 + np.arange(reset_offset - 1)).reshape(1, -1)
v_gt_pre = np.arange(num_neurons).reshape(-1, 1) * dt \
+ (1 + np.arange(num_neurons)).reshape(-1, 1) * dt
dt = (1 + np.arange(num_steps - reset_offset + 1) % 4).reshape(1, -1)
v_gt_post = (1 + np.arange(num_neurons)).reshape(-1, 1) * dt

self.assertTrue(np.array_equal(u[:, :reset_offset - 1], u_gt_pre))
self.assertTrue(np.array_equal(u[:, reset_offset - 1:], u_gt_post))
self.assertTrue(np.array_equal(v[:, :reset_offset - 1], v_gt_pre))
self.assertTrue(np.array_equal(v[:, reset_offset - 1:], v_gt_post))

def test_fixed_model(self):
"""Test fixed model"""
num_neurons = 10
num_steps = 16
reset_interval = 4
reset_offset = 3

lif_reset = LIFReset(shape=(num_neurons,),
u=np.arange(num_neurons),
du=-1,
dv=0,
vth=100,
bias_mant=np.arange(num_neurons) + 1,
reset_interval=reset_interval,
reset_offset=reset_offset)

u_logger = io.sink.Read(buffer=num_steps)
v_logger = io.sink.Read(buffer=num_steps)

u_logger.connect_var(lif_reset.u)
v_logger.connect_var(lif_reset.v)

lif_reset.run(condition=RunSteps(num_steps),
run_cfg=Loihi2SimCfg(select_tag='fixed_pt'))
u = u_logger.data.get()
v = v_logger.data.get()
lif_reset.stop()

# Lava timesteps start from t=0. So the first reset offset is missed.
u_gt_pre = np.vstack([np.arange(num_neurons)] * 2).T
u_gt_post = np.zeros((num_neurons, num_steps - reset_offset + 1))

dt = (1 + np.arange(reset_offset - 1)).reshape(1, -1)
v_gt_pre = np.arange(num_neurons).reshape(-1, 1) * dt \
+ (1 + np.arange(num_neurons)).reshape(-1, 1) * dt
dt = (1 + np.arange(num_steps - reset_offset + 1) % 4).reshape(1, -1)
v_gt_post = (1 + np.arange(num_neurons)).reshape(-1, 1) * dt

self.assertTrue(np.array_equal(u[:, :reset_offset - 1], u_gt_pre))
self.assertTrue(np.array_equal(u[:, reset_offset - 1:], u_gt_post))
self.assertTrue(np.array_equal(v[:, :reset_offset - 1], v_gt_pre))
self.assertTrue(np.array_equal(v[:, reset_offset - 1:], v_gt_post))