Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Affine hdf5 export (#221) #222

Merged
merged 15 commits into from
Sep 9, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
7 changes: 5 additions & 2 deletions src/lava/lib/dl/slayer/block/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -391,8 +391,11 @@ def delay(d):
self.delay.clamp() # clamp the delay value
handle.create_dataset('delay', data=delay(self.delay))

# for key, value in self.neuron.device_params.items():
# handle.create_dataset(f'neuron/{key}', data=value)
if self.dynamics is True:
for key, value in self.neuron.device_params.items():
if key == 'vThMant':
value = (1 << 18) - 1 # set the maximum possible threshold
handle.create_dataset(f'neuron/{key}', data=value)


class AbstractTimeDecimation(torch.nn.Module):
Expand Down
9 changes: 8 additions & 1 deletion src/lava/lib/dl/slayer/block/cuba.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

"""CUBA-LIF layer blocks"""

import numpy as np
import torch

from . import base
Expand Down Expand Up @@ -68,7 +69,13 @@ def __init__(self, *args, **kwargs):
self.synapse = synapse.Dense(**self.synapse_params)
if 'pre_hook_fx' not in kwargs.keys():
self.synapse.pre_hook_fx = self.neuron.quantize_8bit
self.neuron._threshold = None
# if 'dynamics=True', set threshold to not 'none' value
if self.dynamics:
self.neuron._threshold = -1
else:
self.neuron._threshold = None
# set the shape according to synapse output
self.neuron.shape = torch.Size([self.synapse.out_channels])
# this disables spike and reset in dynamics
del self.synapse_params

Expand Down
117 changes: 89 additions & 28 deletions tests/lava/lib/dl/slayer/block/test_cuba.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,9 @@
from lava.proc.conv import utils
from lava.proc import io

verbose = True if (('-v' in sys.argv) or ('--verbose' in sys.argv)) else False
# Enabling torch sometimes causes multiprocessing error, especially in unittests
verbose = True if (("-v" in sys.argv) or ("--verbose" in sys.argv)) else False
# Enabling torch sometimes causes multiprocessing error,
# especially in unittests
utils.TORCH_IS_AVAILABLE = False

# seed = np.random.randint(1000)
Expand All @@ -25,27 +26,81 @@
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
if verbose:
print(f'{seed=}')
print(f"{seed=}")

if torch.cuda.is_available():
device = torch.device('cuda')
device = torch.device("cuda")
else:
if verbose:
print('CUDA is not available in the system. '
'Testing for CPU version only.')
device = torch.device('cpu')
print(
"CUDA is not available in the system. "
"Testing for CPU version only."
)
device = torch.device("cpu")

tempdir = os.path.dirname(__file__) + '/temp'
tempdir = os.path.dirname(__file__) + "/temp"
os.makedirs(tempdir, exist_ok=True)

neuron_param = {'threshold': 0.5,
'current_decay': 0.5,
'voltage_decay': 0.5}
neuron_param = {"threshold": 0.5, "current_decay": 0.5, "voltage_decay": 0.5}


class TestCUBA(unittest.TestCase):
"""Test CUBA blocks"""

def test_affine_block_hdf5_export_dynamics_false(self):
"""Test affine block hdf5 export in dynamics=false mode."""
in_features = 10
out_features = 5

net = slayer.block.cuba.Affine(
neuron_params=neuron_param,
in_neurons=in_features,
out_neurons=out_features,
dynamics=False,
count_log=False,
)

# export slayer network
h = h5py.File(tempdir + "/cuba_affine_dynamics_false.net", "w")
net.export_hdf5(h.create_group("layer/0"))

# reload net from h5 and check if 'neuron' exists.
lava_net = netx.hdf5.Network(
net_config=tempdir + "/cuba_affine_dynamics_false.net"
)
ahenkes1 marked this conversation as resolved.
Show resolved Hide resolved

self.assertTrue("neuron" not in lava_net.net_config["layer"][0].keys())

def test_affine_block_hdf5_export_dynamics_true(self):
"""Test affine block hdf5 export in dynamics=true mode."""
in_features = 10
out_features = 5

net = slayer.block.cuba.Affine(
neuron_params=neuron_param,
in_neurons=in_features,
out_neurons=out_features,
dynamics=True,
count_log=False,
)

# export slayer network
h = h5py.File(tempdir + "/cuba_affine_dynamics_true.net", "w")
net.export_hdf5(h.create_group("layer/0"))

# reload net from h5 and check if 'vThMant' is '(1 << 17)'.
# lava_net = netx.hdf5.Network(
# net_config=tempdir + "/cuba_affine_dynamics_true.net"
# )
# layer = lava_net.layers[0]
# neuron = layer.__dict__["neuron"].__dict__

# load network file and check neuron
with h5py.File(tempdir + "/cuba_affine_dynamics_true.net", "r") as hf:
vThMant = np.array(hf["layer"]["0"]["neuron"]["vThMant"])

self.assertTrue(vThMant == (1 << 18) - 1)

def test_dense_block(self):
"""Test dense block with lava process implementation."""
in_features = 10
Expand All @@ -58,27 +113,28 @@ def test_dense_block(self):
y = net(x)

# export slayer network
net.export_hdf5(h5py.File(tempdir + '/cuba_dense.net',
'w').create_group('layer/0'))
net.export_hdf5(
ahenkes1 marked this conversation as resolved.
Show resolved Hide resolved
h5py.File(tempdir + "/cuba_dense.net", "w").create_group("layer/0")
)

# create equivalent lava network using netx and evaluate output
lava_net = netx.hdf5.Network(net_config=tempdir + '/cuba_dense.net')
lava_net = netx.hdf5.Network(net_config=tempdir + "/cuba_dense.net")
source = io.source.RingBuffer(data=x[0])
sink = io.sink.RingBuffer(shape=lava_net.out.shape, buffer=time_steps)
source.s_out.connect(lava_net.inp)
lava_net.out.connect(sink.a_in)
run_condition = RunSteps(num_steps=time_steps)
run_config = Loihi1SimCfg(select_tag='fixed_pt')
run_config = Loihi1SimCfg(select_tag="fixed_pt")
lava_net.run(condition=run_condition, run_cfg=run_config)
output = sink.data.get()
lava_net.stop()

if verbose:
print()
print(lava_net)
print('slayer output:')
print("slayer output:")
print(y[0])
print('lava output:')
print("lava output:")
print(output)

self.assertTrue(np.abs(y[0].data.numpy() - output).sum() == 0)
Expand All @@ -93,35 +149,40 @@ def test_conv_block(self):
time_steps = 10

# create slayer network and evaluate output
net = slayer.block.cuba.Conv(neuron_param,
in_features, out_features, kernel_size)
x = (torch.rand([1, in_features,
height, width, time_steps]) > 0.5).float()
net = slayer.block.cuba.Conv(
neuron_param, in_features, out_features, kernel_size
)
x = (
torch.rand([1, in_features, height, width, time_steps]) > 0.5
).float()
y = net(x).permute((0, 3, 2, 1, 4))

# export slayer network
net.export_hdf5(h5py.File(tempdir + '/cuba_conv.net',
'w').create_group('layer/0'))
net.export_hdf5(
h5py.File(tempdir + "/cuba_conv.net", "w").create_group("layer/0")
)

# create equivalent lava network using netx and evaluate output
lava_net = netx.hdf5.Network(net_config=tempdir + '/cuba_conv.net',
input_shape=(width, height, in_features))
lava_net = netx.hdf5.Network(
net_config=tempdir + "/cuba_conv.net",
input_shape=(width, height, in_features),
)
source = io.source.RingBuffer(data=x[0].permute((2, 1, 0, 3)))
sink = io.sink.RingBuffer(shape=lava_net.out.shape, buffer=time_steps)
source.s_out.connect(lava_net.inp)
lava_net.out.connect(sink.a_in)
run_condition = RunSteps(num_steps=time_steps)
run_config = Loihi1SimCfg(select_tag='fixed_pt')
run_config = Loihi1SimCfg(select_tag="fixed_pt")
lava_net.run(condition=run_condition, run_cfg=run_config)
output = sink.data.get()
lava_net.stop()

if verbose:
print()
print(lava_net)
print('slayer output:')
print("slayer output:")
print(y[0][0, 0, 0])
print('lava output:')
print("lava output:")
print(output[0, 0, 0])

self.assertTrue(np.abs(y[0].data.numpy() - output).sum() == 0)