Skip to content

Commit

Permalink
Implementation of LIF reset models in CPU backend (#415)
Browse files Browse the repository at this point in the history
* update refport unittest to always wait when it writes to port for consistent behavior

Signed-off-by: bamsumit <bam_sumit@hotmail.com>

* Removed pyproject changes

Signed-off-by: bamsumit <bam_sumit@hotmail.com>

* Fix to convolution tests. Fixed imcompatible mnist_pretrained for old python versions.

Signed-off-by: bamsumit <bam_sumit@hotmail.com>

* Missing moudle parent fix

Signed-off-by: bamsumit <bam_sumit@hotmail.com>

* Added ConvVarModel

* LIF reset python model implementation

Signed-off-by: bamsumit <bam_sumit@hotmail.com>

* Update ci-build.yml

try fix poetry issues

Signed-off-by: bamsumit <bam_sumit@hotmail.com>
Co-authored-by: Joyesh Mishra <joyesh.mishra@intel.com>
Co-authored-by: PhilippPlank <32519998+PhilippPlank@users.noreply.github.com>
  • Loading branch information
3 people committed Oct 14, 2022
1 parent 482bb0a commit 51b3334
Show file tree
Hide file tree
Showing 4 changed files with 226 additions and 80 deletions.
96 changes: 21 additions & 75 deletions .github/workflows/ci-build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,45 +10,26 @@ jobs:
- uses: actions/checkout@v2
with:
lfs: true

- name: Install Poetry
run: pipx install poetry

- name: Set up Python 3.9
uses: actions/setup-python@v2
uses: actions/setup-python@v4
with:
python-version: 3.9

- name: Install Poetry
uses: snok/install-poetry@v1
with:
version: 1.2.1
virtualenvs-path: .venv
virtualenvs-create: true
virtualenvs-in-project: true

- name: Load cached venv
id: cached-poetry-dependencies
uses: actions/cache@v2
with:
path: ~/.cache
key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-01-${{ hashFiles('**/poetry.lock') }}

- name: Install dependencies
if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true'
run: poetry install --no-interaction --no-root
cache: 'poetry'

- name: Install Lava
run: poetry install --no-interaction

- name: Run flakeheaven (flake8)
if: runner.os == 'Linux' || runner.os == 'macOS'
run: |
source $VENV
flakeheaven lint src/lava tests/
run: poetry run flakeheaven lint src/lava tests/

- name: Run flakeheaven (flake8)
if: runner.os == 'Windows'
run: |
.venv\Scripts\activate.ps1
flakeheaven lint src/lava tests/
run: poetry run flakeheaven lint src/lava tests/

security-lint:
name: Security Lint Code
Expand All @@ -58,30 +39,15 @@ jobs:
- uses: actions/checkout@v2
with:
lfs: true

- name: Install Poetry
run: pipx install poetry

- name: Set up Python 3.9
uses: actions/setup-python@v2
uses: actions/setup-python@v4
with:
python-version: 3.9

- name: Install Poetry
uses: snok/install-poetry@v1
with:
version: 1.2.1
virtualenvs-path: .venv
virtualenvs-create: true
virtualenvs-in-project: true

- name: Load cached venv
id: cached-poetry-dependencies
uses: actions/cache@v2
with:
path: ~/.cache
key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-01-${{ hashFiles('**/poetry.lock') }}

- name: Install dependencies
if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true'
run: poetry install --no-interaction --no-root
cache: 'poetry'

- name: Install Lava
run: poetry install --no-interaction
Expand All @@ -104,46 +70,26 @@ jobs:
- uses: actions/checkout@v2
with:
lfs: true

- name: Install Poetry
run: pipx install poetry

- name: Set up Python 3.9
uses: actions/setup-python@v2
uses: actions/setup-python@v4
with:
python-version: 3.9

- name: Install Poetry
uses: snok/install-poetry@v1
with:
version: 1.2.1
virtualenvs-path: .venv
virtualenvs-create: true
virtualenvs-in-project: true

- name: Load cached venv
id: cached-poetry-dependencies
uses: actions/cache@v2
with:
path: ~/.cache
key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-01-${{ hashFiles('**/poetry.lock') }}

- name: Install dependencies
if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true'
run: poetry install --no-interaction --no-root
cache: 'poetry'

- name: Install Lava
run: poetry install --no-interaction

- name: Run unit tests
if: runner.os == 'Linux' || runner.os == 'macOS'
run: |
git lfs fetch
git lfs pull
source $VENV
python3 -m pip install ipykernel
pytest
poetry run git lfs fetch
poetry run git lfs pull
poetry run pytest
- name: Run unit tests
if: runner.os == 'Windows'
run: |
.venv\Scripts\activate.ps1
python3 -m pip install ipykernel
pytest
run: poetry run pytest
6 changes: 4 additions & 2 deletions src/lava/proc/io/encoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,8 @@ def __init__(self, proc_params: Optional[Dict] = None):

def run_spk(self):
self.s_out.send(self.s_out_buf)
a_in_data = np.left_shift(self.a_in.recv(), self.spike_exp)
a_in_data = np.left_shift(self.a_in.recv().astype(int),
self.spike_exp)
self.s_out_buf = self.encode_delta(a_in_data)


Expand Down Expand Up @@ -235,7 +236,8 @@ def encode_delta_sparse_8(self, s_out):
def run_spk(self):
self.s_out.send(self.data, self.idx)
# Receive synaptic input
a_in_data = np.left_shift(self.a_in.recv(), self.spike_exp)
a_in_data = np.left_shift(self.a_in.recv().astype(int),
self.spike_exp)
s_out = self.encode_delta(a_in_data)
if self.compression == Compression.SPARSE:
self.data, self.idx = self.encode_sparse(s_out)
Expand Down
109 changes: 108 additions & 1 deletion src/lava/proc/lif/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from lava.magma.core.resources import CPU
from lava.magma.core.decorator import implements, requires, tag
from lava.magma.core.model.py.model import PyLoihiProcessModel
from lava.proc.lif.process import LIF, TernaryLIF
from lava.proc.lif.process import LIF, LIFReset, TernaryLIF


class AbstractPyLifModelFloat(PyLoihiProcessModel):
Expand Down Expand Up @@ -320,3 +320,110 @@ def reset_voltage(self, spike_vector: np.ndarray):
"""Reset voltage of all spiking neurons to 0.
"""
self.v[spike_vector != 0] = 0 # Reset voltage to 0 wherever we spiked


@implements(proc=LIFReset, protocol=LoihiProtocol)
@requires(CPU)
@tag('floating_pt')
class PyLifResetModelFloat(AbstractPyLifModelFloat):
"""Implementation of Leaky-Integrate-and-Fire neural process with reset
in floating point precision.
"""
s_out: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, float)
vth: float = LavaPyType(float, float)

def __init__(self, proc_params):
super(PyLifResetModelFloat, self).__init__(proc_params)
self.reset_interval = proc_params['reset_interval']
self.reset_offset = (proc_params['reset_offset']) % self.reset_interval

def spiking_activation(self):
"""Spiking activation function for LIF.
"""
return self.v > self.vth

def run_spk(self):
"""The run function that performs the actual computation during
execution orchestrated by a PyLoihiProcessModel using the
LoihiProtocol.
"""
# Receive synaptic input
a_in_data = self.a_in.recv()

if (self.time_step % self.reset_interval) == self.reset_offset:
self.u *= 0
self.v *= 0

self.subthr_dynamics(activation_in=a_in_data)

s_out = self.spiking_activation()

# Reset voltage of spiked neurons to 0
self.reset_voltage(spike_vector=s_out)
self.s_out.send(s_out)


@implements(proc=LIFReset, protocol=LoihiProtocol)
@requires(CPU)
@tag('bit_accurate_loihi', 'fixed_pt')
class PyLifResetModelBitAcc(AbstractPyLifModelFixed):
"""Implementation of Leaky-Integrate-and-Fire neural process with reset
bit-accurate with Loihi's hardware LIF dynamics, which means, it mimics
Loihi behaviour.
Precisions of state variables
- du: unsigned 12-bit integer (0 to 4095)
- dv: unsigned 12-bit integer (0 to 4095)
- bias_mant: signed 13-bit integer (-4096 to 4095). Mantissa part of neuron
bias.
- bias_exp: unsigned 3-bit integer (0 to 7). Exponent part of neuron bias.
- vth: unsigned 17-bit integer (0 to 131071).
"""
s_out: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, np.int32, precision=24)
vth: int = LavaPyType(int, np.int32, precision=17)

def __init__(self, proc_params):
super(PyLifResetModelBitAcc, self).__init__(proc_params)
self.effective_vth = 0
self.reset_interval = proc_params['reset_interval']
self.reset_offset = (proc_params['reset_offset']) % self.reset_interval

def scale_threshold(self):
"""Scale threshold according to the way Loihi hardware scales it. In
Loihi hardware, threshold is left-shifted by 6-bits to MSB-align it
with other state variables of higher precision.
"""
self.effective_vth = np.left_shift(self.vth, self.vth_shift)
self.isthrscaled = True

def spiking_activation(self):
"""Spike when voltage exceeds threshold.
"""
return self.v > self.effective_vth

def run_spk(self):
"""The run function that performs the actual computation during
execution orchestrated by a PyLoihiProcessModel using the
LoihiProtocol.
"""
# Receive synaptic input
a_in_data = self.a_in.recv()

if (self.time_step % self.reset_interval) == self.reset_offset:
self.u *= 0
self.v *= 0

self.scale_bias()

if not self.isthrscaled:
self.scale_threshold()

self.subthr_dynamics(activation_in=a_in_data)

s_out = self.spiking_activation()

# Reset voltage of spiked neurons to 0
self.reset_voltage(spike_vector=s_out)
self.s_out.send(s_out)
95 changes: 93 additions & 2 deletions tests/lava/proc/lif/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,11 @@
from lava.magma.core.process.process import AbstractProcess
from lava.magma.core.process.variable import Var
from lava.magma.core.resources import CPU
from lava.magma.core.run_configs import RunConfig
from lava.magma.core.run_configs import Loihi1SimCfg, Loihi2SimCfg, RunConfig
from lava.magma.core.run_conditions import RunSteps
from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol
from lava.proc.lif.process import LIF, TernaryLIF
from lava.proc.lif.process import LIF, LIFReset, TernaryLIF
from lava.proc import io


class LifRunConfig(RunConfig):
Expand Down Expand Up @@ -732,3 +733,93 @@ def test_fixed_pm_neg_impulse_dv(self):
lif_v_float = np.right_shift(np.array(lif_v), 6)
self.assertListEqual(expected_v_timeseries, lif_v)
self.assertListEqual(expected_float_v, lif_v_float.tolist())


class TestTLIFReset(unittest.TestCase):
"""Test LIF reset process models"""

def test_float_model(self):
"""Test float model"""
num_neurons = 10
num_steps = 16
reset_interval = 4
reset_offset = 3

lif_reset = LIFReset(shape=(num_neurons,),
u=np.arange(num_neurons),
du=0,
dv=0,
vth=100,
bias_mant=np.arange(num_neurons) + 1,
reset_interval=reset_interval,
reset_offset=reset_offset)

u_logger = io.sink.Read(buffer=num_steps)
v_logger = io.sink.Read(buffer=num_steps)

u_logger.connect_var(lif_reset.u)
v_logger.connect_var(lif_reset.v)

lif_reset.run(condition=RunSteps(num_steps),
run_cfg=Loihi2SimCfg())
u = u_logger.data.get()
v = v_logger.data.get()
lif_reset.stop()

# Lava timesteps start from t=0. So the first reset offset is missed.
u_gt_pre = np.vstack([np.arange(num_neurons)] * 2).T
u_gt_post = np.zeros((num_neurons, num_steps - reset_offset + 1))

dt = (1 + np.arange(reset_offset - 1)).reshape(1, -1)
v_gt_pre = np.arange(num_neurons).reshape(-1, 1) * dt \
+ (1 + np.arange(num_neurons)).reshape(-1, 1) * dt
dt = (1 + np.arange(num_steps - reset_offset + 1) % 4).reshape(1, -1)
v_gt_post = (1 + np.arange(num_neurons)).reshape(-1, 1) * dt

self.assertTrue(np.array_equal(u[:, :reset_offset - 1], u_gt_pre))
self.assertTrue(np.array_equal(u[:, reset_offset - 1:], u_gt_post))
self.assertTrue(np.array_equal(v[:, :reset_offset - 1], v_gt_pre))
self.assertTrue(np.array_equal(v[:, reset_offset - 1:], v_gt_post))

def test_fixed_model(self):
"""Test fixed model"""
num_neurons = 10
num_steps = 16
reset_interval = 4
reset_offset = 3

lif_reset = LIFReset(shape=(num_neurons,),
u=np.arange(num_neurons),
du=-1,
dv=0,
vth=100,
bias_mant=np.arange(num_neurons) + 1,
reset_interval=reset_interval,
reset_offset=reset_offset)

u_logger = io.sink.Read(buffer=num_steps)
v_logger = io.sink.Read(buffer=num_steps)

u_logger.connect_var(lif_reset.u)
v_logger.connect_var(lif_reset.v)

lif_reset.run(condition=RunSteps(num_steps),
run_cfg=Loihi2SimCfg(select_tag='fixed_pt'))
u = u_logger.data.get()
v = v_logger.data.get()
lif_reset.stop()

# Lava timesteps start from t=0. So the first reset offset is missed.
u_gt_pre = np.vstack([np.arange(num_neurons)] * 2).T
u_gt_post = np.zeros((num_neurons, num_steps - reset_offset + 1))

dt = (1 + np.arange(reset_offset - 1)).reshape(1, -1)
v_gt_pre = np.arange(num_neurons).reshape(-1, 1) * dt \
+ (1 + np.arange(num_neurons)).reshape(-1, 1) * dt
dt = (1 + np.arange(num_steps - reset_offset + 1) % 4).reshape(1, -1)
v_gt_post = (1 + np.arange(num_neurons)).reshape(-1, 1) * dt

self.assertTrue(np.array_equal(u[:, :reset_offset - 1], u_gt_pre))
self.assertTrue(np.array_equal(u[:, reset_offset - 1:], u_gt_post))
self.assertTrue(np.array_equal(v[:, :reset_offset - 1], v_gt_pre))
self.assertTrue(np.array_equal(v[:, reset_offset - 1:], v_gt_post))

0 comments on commit 51b3334

Please sign in to comment.