From 452e68efec13448934559285d770f14d295848cb Mon Sep 17 00:00:00 2001 From: gkarray Date: Fri, 2 Dec 2022 09:51:32 +0100 Subject: [PATCH 01/32] Adding first round of Event Data Processes --- .gitattributes | 1 + pyproject.toml | 1 + .../event_data_loader/aedat_data_loader.py | 137 ++++++++ .../dense_to_dense/down_sampling_dense.py | 194 ++++++++++ .../dense_to_dense/flattening.py | 49 +++ .../sparse_to_dense/sparse_to_dense.py | 121 +++++++ .../binary_to_unary_polarity.py | 66 ++++ .../event_data/event_pre_processor/utils.py | 24 ++ tests/lava/proc/event_data/__init__.py | 0 .../lava/proc/event_data/dvs_recording.aedat4 | 3 + .../test_aedat_data_loader.py | 295 ++++++++++++++++ .../test_down_sampling_dense.py | 330 ++++++++++++++++++ .../dense_to_dense/test_flattening.py | 20 ++ .../sparse_to_dense/test_sparse_to_dense.py | 295 ++++++++++++++++ .../test_binary_to_unary_polarity.py | 174 +++++++++ .../lava/proc/event_data/test_integration.py | 161 +++++++++ 16 files changed, 1871 insertions(+) create mode 100644 src/lava/proc/event_data/event_data_loader/aedat_data_loader.py create mode 100644 src/lava/proc/event_data/event_pre_processor/dense_to_dense/down_sampling_dense.py create mode 100644 src/lava/proc/event_data/event_pre_processor/dense_to_dense/flattening.py create mode 100644 src/lava/proc/event_data/event_pre_processor/sparse_to_dense/sparse_to_dense.py create mode 100644 src/lava/proc/event_data/event_pre_processor/sparse_to_sparse/binary_to_unary_polarity.py create mode 100644 src/lava/proc/event_data/event_pre_processor/utils.py create mode 100644 tests/lava/proc/event_data/__init__.py create mode 100644 tests/lava/proc/event_data/dvs_recording.aedat4 create mode 100644 tests/lava/proc/event_data/event_data_loader/test_aedat_data_loader.py create mode 100644 tests/lava/proc/event_data/event_pre_processor/dense_to_dense/test_down_sampling_dense.py create mode 100644 tests/lava/proc/event_data/event_pre_processor/dense_to_dense/test_flattening.py create mode 100644 tests/lava/proc/event_data/event_pre_processor/sparse_to_dense/test_sparse_to_dense.py create mode 100644 tests/lava/proc/event_data/event_pre_processor/sparse_to_sparse/test_binary_to_unary_polarity.py create mode 100644 tests/lava/proc/event_data/test_integration.py diff --git a/.gitattributes b/.gitattributes index 39c922b93..6e5510e3a 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1 +1,2 @@ *.npy* filter=lfs diff=lfs merge=lfs -text +*.aedat4 filter=lfs diff=lfs merge=lfs -text diff --git a/pyproject.toml b/pyproject.toml index 055482a3d..854ea2706 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,6 +51,7 @@ numpy = "^1.22.2" scipy = "^1.8.0" networkx = "<=2.8.7" asteval = "^0.9.27" +dv = "^1.0.10" [tool.poetry.dev-dependencies] bandit = "1.7.2" diff --git a/src/lava/proc/event_data/event_data_loader/aedat_data_loader.py b/src/lava/proc/event_data/event_data_loader/aedat_data_loader.py new file mode 100644 index 000000000..fec88f025 --- /dev/null +++ b/src/lava/proc/event_data/event_data_loader/aedat_data_loader.py @@ -0,0 +1,137 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import typing as ty + +from dv import AedatFile +import numpy as np +import random +from operator import itemgetter +import os + +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.process.ports.ports import OutPort + +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol +from lava.magma.core.model.py.ports import PyOutPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.resources import CPU +from lava.magma.core.decorator import implements, requires +from lava.magma.core.model.py.model import PyLoihiProcessModel + + +class AedatDataLoader(AbstractProcess): + def __init__(self, + file_path: str, + shape_out: tuple, + seed_sub_sampling: int = None, + **kwargs) -> None: + super().__init__(file_path=file_path, + shape_out=shape_out, + seed_sub_sampling=seed_sub_sampling, + **kwargs) + + self._validate_file_path(file_path) + self._validate_shape_out(shape_out) + + self.out_port = OutPort(shape=shape_out) + + @staticmethod + def _validate_file_path(file_path): + # Checking file extension + if not file_path[-7:] == ".aedat4": + raise ValueError(f"Given file should be an .aedat4 file. " + f"{file_path} given.") + + try: + # Checking file size + if os.stat(file_path).st_size > 0: + return file_path + except FileNotFoundError: + # Checking file exists + raise FileNotFoundError(f"File not found. {file_path} given.") + + return file_path + + @staticmethod + def _validate_shape_out(shape_out): + if not isinstance(shape_out[0], int): + raise ValueError(f"Max number of events should be an integer." + f"{shape_out} given.") + + if shape_out[0] <= 0: + raise ValueError(f"Max number of events should be positive. " + f"{shape_out} given.") + + if len(shape_out) != 1: + raise ValueError(f"Shape of the OutPort should be 1D. " + f"{shape_out} given.") + + return shape_out + + +@implements(proc=AedatDataLoader, protocol=LoihiProtocol) +@requires(CPU) +class AedatDataLoaderPM(PyLoihiProcessModel): + out_port: PyOutPort = LavaPyType(PyOutPort.VEC_SPARSE, int) + + def __init__(self, proc_params: dict) -> None: + super().__init__(proc_params) + self._file_path = proc_params["file_path"] + self._shape_out = proc_params["shape_out"] + + self._init_aedat_file() + self._frame_shape = (self._file["events"].size_x, + self._file["events"].size_y) + + seed_sub_sampling = proc_params["seed_sub_sampling"] + self._random_rng = np.random.default_rng(seed_sub_sampling) + + def _init_aedat_file(self) -> None: + self._file = AedatFile(file_name=self._file_path) + self._stream = self._file["events"].numpy() + + def run_spk(self) -> None: + events = self._get_next_event_batch() + + xs, ys, ps = events['x'], events['y'], events['polarity'] + + data, indices = self._encode_data_and_indices(xs, ys, ps) + + if data.shape[0] > self._shape_out[0]: + # If we have more data than our shape allows, subsample + data, indices = self._sub_sample(data, indices) + + self.out_port.send(data, indices) + + def _get_next_event_batch(self): + try: + events = self._stream.__next__() + except StopIteration: + self._init_aedat_file() + events = self._stream.__next__() + + return events + + def _encode_data_and_indices(self, + xs: np.ndarray, + ys: np.ndarray, + ps: np.ndarray) \ + -> ty.Tuple[np.ndarray, np.ndarray]: + data = ps + indices = np.ravel_multi_index((xs, ys), self._frame_shape) + + return data, indices + + def _sub_sample(self, + data: np.ndarray, + indices: np.ndarray) \ + -> ty.Tuple[np.ndarray, np.ndarray]: + # TODO: print a warning if subsampling, say how much data has been lost + data_idx_array = np.arange(0, data.shape[0]) + sampled_idx = self._random_rng.choice(data_idx_array, + self._shape_out[0], + replace=False) + + return data[sampled_idx], indices[sampled_idx] diff --git a/src/lava/proc/event_data/event_pre_processor/dense_to_dense/down_sampling_dense.py b/src/lava/proc/event_data/event_pre_processor/dense_to_dense/down_sampling_dense.py new file mode 100644 index 000000000..64925ec4a --- /dev/null +++ b/src/lava/proc/event_data/event_pre_processor/dense_to_dense/down_sampling_dense.py @@ -0,0 +1,194 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import typing as ty + +import numpy as np +from scipy import signal +from numpy.lib.stride_tricks import as_strided + +from lava.proc.event_data.event_pre_processor.utils import \ + DownSamplingMethodDense + +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.process.ports.ports import InPort, OutPort + +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol +from lava.magma.core.model.py.ports import PyInPort, PyOutPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.resources import CPU +from lava.magma.core.decorator import implements, requires +from lava.magma.core.model.py.model import PyLoihiProcessModel + + +class DownSamplingDense(AbstractProcess): + def __init__(self, + shape_in: tuple, + down_sampling_method: DownSamplingMethodDense, + down_sampling_factor: int, + **kwargs) -> None: + super().__init__(shape_in=shape_in, + down_sampling_method=down_sampling_method, + down_sampling_factor=down_sampling_factor, + **kwargs) + + self._validate_shape_in(shape_in) + self._validate_down_sampling_method(down_sampling_method) + self._validate_down_sampling_factor(down_sampling_factor) + # test invalid shape in (negative/decimal values, 1d, 4+d, 3rd dim not 2) + # test for invalid down sampling factor (negative values) + # test for invalid type given to down sampling method + + shape_out = (shape_in[0] // down_sampling_factor, + shape_in[1] // down_sampling_factor) + self.in_port = InPort(shape=shape_in) + self.out_port = OutPort(shape=shape_out) + + @staticmethod + def _validate_shape_in(shape_in): + if not (len(shape_in) == 2 or len(shape_in) == 3): + raise ValueError(f"shape_in should be 2 or 3 dimensional. " + f"{shape_in} given.") + + if not isinstance(shape_in[0], int) or not isinstance(shape_in[1], int): + raise ValueError(f"Width and height of shape_in should be integers." + f"{shape_in} given.") + if len(shape_in) == 3: + if shape_in[2] != 2: + raise ValueError(f"Third dimension of shape_in should be " + f"equal to 2. " + f"{shape_in} given.") + + if shape_in[0] <= 0 or shape_in[1] <= 0: + raise ValueError(f"Width and height of shape_in should be positive." + f"{shape_in} given.") + + return shape_in + + @staticmethod + def _validate_down_sampling_method(down_sampling_method): + if not isinstance(down_sampling_method, DownSamplingMethodDense): + raise (TypeError( + f"Down sampling methods for dense to dense down-sampling need to be " + f"selected using the DownSamplingMethodDense Enum.")) + # TODO: mention that it's an enum in error message? + + @staticmethod + def _validate_down_sampling_factor(down_sampling_factor): + # TODO: should the down sampling factor be a float or an int? + if not isinstance(down_sampling_factor, int): + raise (ValueError(f"Down sampling factor should be an integer." + f"{down_sampling_factor} given.")) + + if down_sampling_factor <= 0: + raise ValueError(f"Down sampling factor should be positive." + f"{down_sampling_factor} given.") + + +@implements(proc=DownSamplingDense, protocol=LoihiProtocol) +@requires(CPU) +class DownSamplingDensePM(PyLoihiProcessModel): + in_port: PyInPort = LavaPyType(PyInPort.VEC_DENSE, int) + out_port: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, int) + + def __init__(self, proc_params: dict) -> None: + super().__init__(proc_params) + self._shape_in = proc_params["shape_in"] + self._down_sampling_method = proc_params["down_sampling_method"] + self._down_sampling_factor = proc_params["down_sampling_factor"] + + self._shape_out = (self._shape_in[0] // self._down_sampling_factor, + self._shape_in[1] // self._down_sampling_factor) + + def run_spk(self) -> None: + data = self.in_port.recv() + + down_sampled_data = self._down_sample(data) + + self.out_port.send(down_sampled_data) + + def _down_sample(self, data: np.ndarray) -> np.ndarray: + if self._down_sampling_method == DownSamplingMethodDense.SKIPPING: + down_sampled_data = \ + self._down_sample_skipping(data, + self._down_sampling_factor, + self._shape_out[0], + self._shape_out[1]) + + elif self._down_sampling_method == DownSamplingMethodDense.MAX_POOLING: + down_sampled_data = \ + self._down_sample_max_pooling(data, + self._down_sampling_factor, + self._shape_out[0], + self._shape_out[1]) + + elif self._down_sampling_method == DownSamplingMethodDense.CONVOLUTION: + down_sampled_data = \ + self._down_sample_convolution(data, + self._down_sampling_factor, + self._shape_out[0], + self._shape_out[1]) + + else: + # TODO : Remove since validation is taking care of this ? + raise ValueError(f"Unknown down_sample_mode." + f"{self._down_sampling_method=} given.") + + return down_sampled_data + + @staticmethod + def _down_sample_skipping(data: np.ndarray, + down_sampling_factor: int, + down_sampled_width: int, + down_sampled_height: int) -> np.ndarray: + down_sampled_data = \ + data[::down_sampling_factor, ::down_sampling_factor] + + down_sampled_data = \ + down_sampled_data[:down_sampled_width, :down_sampled_height] + + return down_sampled_data + + @staticmethod + def _down_sample_max_pooling(data: np.ndarray, + down_sampling_factor: int, + down_sampled_width: int, + down_sampled_height: int) -> np.ndarray: + output_shape = \ + ((data.shape[0] - down_sampling_factor) // down_sampling_factor + 1, + (data.shape[1] - down_sampling_factor) // down_sampling_factor + 1) + + shape_w = (output_shape[0], + output_shape[1], + down_sampling_factor, + down_sampling_factor) + strides_w = (down_sampling_factor * data.strides[0], + down_sampling_factor * data.strides[1], + data.strides[0], + data.strides[1]) + + down_sampled_data = as_strided(data, shape_w, strides_w) + down_sampled_data = down_sampled_data.max(axis=(2, 3)) + + # TODO: Is this really needed ? + down_sampled_data = \ + down_sampled_data[:down_sampled_width, :down_sampled_height] + + return down_sampled_data + + @staticmethod + def _down_sample_convolution(data: np.ndarray, + down_sampling_factor: int, + down_sampled_width: int, + down_sampled_height: int) -> np.ndarray: + kernel = np.ones((down_sampling_factor, down_sampling_factor)) + data_convolved = signal.convolve2d(data, kernel) + + down_sampled_data = \ + data_convolved[::down_sampling_factor, ::down_sampling_factor] + + down_sampled_data = \ + down_sampled_data[:down_sampled_width, :down_sampled_height] + + return down_sampled_data diff --git a/src/lava/proc/event_data/event_pre_processor/dense_to_dense/flattening.py b/src/lava/proc/event_data/event_pre_processor/dense_to_dense/flattening.py new file mode 100644 index 000000000..9409e3134 --- /dev/null +++ b/src/lava/proc/event_data/event_pre_processor/dense_to_dense/flattening.py @@ -0,0 +1,49 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import typing as ty + +import numpy as np + +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.process.ports.ports import InPort, OutPort + +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol +from lava.magma.core.model.py.ports import PyInPort, PyOutPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.resources import CPU +from lava.magma.core.decorator import implements, requires +from lava.magma.core.model.py.model import PyLoihiProcessModel + + +class Flattening(AbstractProcess): + def __init__(self, + shape_in: tuple, + shape_out: tuple, + **kwargs) -> None: + super().__init__(shape_in=shape_in, + shape_out=shape_out, + **kwargs) + + raise NotImplementedError() + + # TODO: Validation + + self.in_port = InPort() + self.out_port = OutPort() + + +@implements(proc=Flattening, protocol=LoihiProtocol) +@requires(CPU) +class FlatteningPM(PyLoihiProcessModel): + in_port: PyOutPort = LavaPyType(PyInPort.VEC_DENSE, int) + out_port: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, int) + + def __init__(self, proc_params: dict) -> None: + super().__init__(proc_params) + raise NotImplementedError() + + def run_spk(self) -> None: + raise NotImplementedError() + diff --git a/src/lava/proc/event_data/event_pre_processor/sparse_to_dense/sparse_to_dense.py b/src/lava/proc/event_data/event_pre_processor/sparse_to_dense/sparse_to_dense.py new file mode 100644 index 000000000..e11c8bc68 --- /dev/null +++ b/src/lava/proc/event_data/event_pre_processor/sparse_to_dense/sparse_to_dense.py @@ -0,0 +1,121 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import typing as ty + +import numpy as np + +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.process.ports.ports import InPort, OutPort + +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol +from lava.magma.core.model.py.ports import PyInPort, PyOutPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.resources import CPU +from lava.magma.core.decorator import implements, requires +from lava.magma.core.model.py.model import PyLoihiProcessModel + + +class SparseToDense(AbstractProcess): + def __init__(self, + shape_in: tuple, + shape_out: tuple, + **kwargs) -> None: + super().__init__(shape_in=shape_in, + shape_out=shape_out, + **kwargs) + + self._validate_shape_in(shape_in) + self._validate_shape_out(shape_out) + + self.in_port = InPort(shape=shape_in) + self.out_port = OutPort(shape=shape_out) + + @staticmethod + def _validate_shape_in(shape_in): + if not isinstance(shape_in[0], int): + raise ValueError(f"Width of shape_in should be an integer. " + f"{shape_in} given.") + + if shape_in[0] <= 0: + raise ValueError(f"Width of shape_in should be positive. {shape_in} given.") + + if len(shape_in) != 1: + raise ValueError(f"shape_in should be 1 dimensional. {shape_in} given.") + + return shape_in + # test 2d instantiation ok + # test 3d instantiation ok + # test what happens when wanting a non 2-3D out shape + # non 1D in shape + # 3rd dimension not 2 in 3D case + # invalid shapes (decimal, negative) + + @staticmethod + def _validate_shape_out(shape_out): + if not (len(shape_out) == 2 or len(shape_out) == 3): + raise ValueError(f"shape out should be 2 or 3 dimensional. {shape_out} given.") + + if not isinstance(shape_out[0], int) or not isinstance(shape_out[1], int): + raise ValueError(f"Width and height of the out shape should be integers. " + f"{shape_out} given.") + if len(shape_out) == 3: + if shape_out[2] != 2: + raise ValueError(f"Depth of the out shape should be an integer and equal to 2. " + f"{shape_out} given.") + + if shape_out[0] <= 0 or shape_out[1] <= 0: + raise ValueError(f"Width and height of the out shape should be positive. {shape_out} given.") + + return shape_out + + +@implements(proc=SparseToDense, protocol=LoihiProtocol) +@requires(CPU) +class SparseToDensePM(PyLoihiProcessModel): + in_port: PyInPort = LavaPyType(PyInPort.VEC_SPARSE, int) + out_port: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, int) + + def __init__(self, proc_params: dict) -> None: + super().__init__(proc_params) + self._shape_out = proc_params["shape_out"] + + def run_spk(self) -> None: + data, indices = self.in_port.recv() + + dense_data = self._transform(data, indices) + + self.out_port.send(dense_data) + + def _transform(self, data: np.ndarray, indices: np.ndarray) -> np.ndarray: + if len(self._shape_out) == 2: + return self._transform_2d(data, indices) + elif len(self._shape_out) == 3: + return self._transform_3d(data, indices) + # TODO : Should we add an else here ? + # TODO : We will never reach it if correctly validated + + def _transform_2d(self, + data: np.ndarray, + indices: np.ndarray) -> np.ndarray: + dense_data = np.zeros(self._shape_out) + + xs, ys = np.unravel_index(indices, self._shape_out) + + dense_data[xs[data == 0], ys[data == 0]] = 1 + dense_data[xs[data == 1], ys[data == 1]] = 1 + + return dense_data + + def _transform_3d(self, + data: np.ndarray, + indices: np.ndarray) -> np.ndarray: + dense_data = np.zeros(self._shape_out) + + xs, ys = np.unravel_index(indices, self._shape_out[:-1]) + + dense_data[xs[data == 0], ys[data == 0], 0] = 1 + dense_data[xs[data == 1], ys[data == 1], 1] = 1 + + return dense_data diff --git a/src/lava/proc/event_data/event_pre_processor/sparse_to_sparse/binary_to_unary_polarity.py b/src/lava/proc/event_data/event_pre_processor/sparse_to_sparse/binary_to_unary_polarity.py new file mode 100644 index 000000000..6013c99f1 --- /dev/null +++ b/src/lava/proc/event_data/event_pre_processor/sparse_to_sparse/binary_to_unary_polarity.py @@ -0,0 +1,66 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import typing as ty + +import numpy as np + +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.process.ports.ports import InPort, OutPort + +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol +from lava.magma.core.model.py.ports import PyInPort, PyOutPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.resources import CPU +from lava.magma.core.decorator import implements, requires +from lava.magma.core.model.py.model import PyLoihiProcessModel + + +class BinaryToUnaryPolarity(AbstractProcess): + def __init__(self, + shape: tuple, + **kwargs) -> None: + super().__init__(shape=shape, **kwargs) + + self._validate_shape(shape) + + self.in_port = InPort(shape=shape) + self.out_port = OutPort(shape=shape) + + @staticmethod + def _validate_shape(shape): + if not isinstance(shape[0], int): + raise ValueError(f"Max number of events should be an integer." + f"{shape} given.") + + if shape[0] <= 0: + raise ValueError(f"Max number of events should be positive. " + f"{shape} given.") + + if len(shape) != 1: + raise ValueError(f"Shape of the OutPort should be 1D. " + f"{shape} given.") + + return shape + + +@implements(proc=BinaryToUnaryPolarity, protocol=LoihiProtocol) +@requires(CPU) +class BinaryToUnaryPolarityPM(PyLoihiProcessModel): + in_port: PyInPort = LavaPyType(PyInPort.VEC_SPARSE, int) + out_port: PyOutPort = LavaPyType(PyOutPort.VEC_SPARSE, int) + + def run_spk(self) -> None: + data, indices = self.in_port.recv() + + data = self._encode(data) + + self.out_port.send(data, indices) + + @staticmethod + def _encode(data: np.ndarray) -> np.ndarray: + data[data == 0] = 1 + + return data + diff --git a/src/lava/proc/event_data/event_pre_processor/utils.py b/src/lava/proc/event_data/event_pre_processor/utils.py new file mode 100644 index 000000000..99c28e8f4 --- /dev/null +++ b/src/lava/proc/event_data/event_pre_processor/utils.py @@ -0,0 +1,24 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +from enum import IntEnum + + +class DownSamplingMethodSparse(IntEnum): + SKIPPING = 0 + MAX_POOLING = 1 + + +class UpSamplingMethodSparse(IntEnum): + REPEAT = 0 + + +class DownSamplingMethodDense(IntEnum): + SKIPPING = 0 + MAX_POOLING = 1 + CONVOLUTION = 2 + + +class UpSamplingMethodDense(IntEnum): + REPEAT = 0 diff --git a/tests/lava/proc/event_data/__init__.py b/tests/lava/proc/event_data/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/lava/proc/event_data/dvs_recording.aedat4 b/tests/lava/proc/event_data/dvs_recording.aedat4 new file mode 100644 index 000000000..49d8cba27 --- /dev/null +++ b/tests/lava/proc/event_data/dvs_recording.aedat4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:431495ecbd8d6b15d7c7fad3474d3c8328e5fd3e87e24d56cc1f66d1653ecfc6 +size 2405 diff --git a/tests/lava/proc/event_data/event_data_loader/test_aedat_data_loader.py b/tests/lava/proc/event_data/event_data_loader/test_aedat_data_loader.py new file mode 100644 index 000000000..9be8c4650 --- /dev/null +++ b/tests/lava/proc/event_data/event_data_loader/test_aedat_data_loader.py @@ -0,0 +1,295 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ +import unittest +from lava.proc.event_data.event_data_loader.aedat_data_loader import AedatDataLoader, \ + AedatDataLoaderPM +from dv import AedatFile +from dv.AedatFile import _AedatFileEventNumpyPacketIterator + +import typing as ty +import numpy as np + +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.process.ports.ports import InPort, OutPort +from lava.magma.core.process.variable import Var + +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol +from lava.magma.core.model.py.ports import PyInPort, PyOutPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.resources import CPU +from lava.magma.core.decorator import implements, requires +from lava.magma.core.model.py.model import PyLoihiProcessModel + +from lava.magma.core.run_conditions import RunSteps +from lava.magma.core.run_configs import Loihi1SimCfg + + +class RecvSparse(AbstractProcess): + def __init__(self, + shape: tuple) -> None: + super().__init__(shape=shape) + + self.in_port = InPort(shape=shape) + + self.data = Var(shape=shape, init=np.zeros(shape, dtype=int)) + self.idx = Var(shape=shape, init=np.zeros(shape, dtype=int)) + + +@implements(proc=RecvSparse, protocol=LoihiProtocol) +@requires(CPU) +class PyRecvSparsePM(PyLoihiProcessModel): + in_port: PyInPort = LavaPyType(PyInPort.VEC_SPARSE, int) + + data: np.ndarray = LavaPyType(np.ndarray, int) + idx: np.ndarray = LavaPyType(np.ndarray, int) + + def run_spk(self) -> None: + data, idx = self.in_port.recv() + + self.data = np.pad(data, + pad_width=(0, self.in_port.shape[0] - data.shape[0])) + self.idx = np.pad(idx, + pad_width=(0, self.in_port.shape[0] - data.shape[0])) + + +class TestProcessAedatDataLoader(unittest.TestCase): + def test_init(self): + """Tests instantiation of AedatDataLoader""" + data_loader = AedatDataLoader(file_path="../dvs_recording.aedat4", + shape_out=(43200,)) + + self.assertIsInstance(data_loader, AedatDataLoader) + self.assertEqual(data_loader.proc_params["file_path"], + "../dvs_recording.aedat4") + self.assertEqual(data_loader.proc_params["shape_out"], (43200,)) + + def test_invalid_file_path_extension(self): + """Tests for invalid file extension""" + with(self.assertRaises(ValueError)): + # Test for .py + _ = AedatDataLoader(file_path="test_aedat_data_loader.py", + shape_out=(43200,)) + + def test_invalid_file_path_missing_file(self): + """Tests for missing file""" + with(self.assertRaises(FileNotFoundError)): + _ = AedatDataLoader(file_path="missing_file.aedat4", + shape_out=(43200,)) + + def test_invalid_shape_out_dimension(self): + """Test for an invalid shape given to the shape_out param""" + with(self.assertRaises(ValueError)): + _ = AedatDataLoader(file_path="../dvs_recording.aedat4", + shape_out=(240, 180)) + + def test_invalid_shape_out_negative_integer(self): + """Tests for a negative width given""" + with(self.assertRaises(ValueError)): + _ = AedatDataLoader(file_path="../dvs_recording.aedat4", + shape_out=(-43200,)) + + def test_invalid_shape_out_decimal(self): + """Tests for a decimal width given""" + with(self.assertRaises(ValueError)): + _ = AedatDataLoader(file_path="../dvs_recording.aedat4", + shape_out=(43200.5,)) + + +class TestProcessModelAedatDataLoader(unittest.TestCase): + def test_init(self): + proc_params = { + "file_path": "../dvs_recording.aedat4", + "shape_out": (3000,), + "seed_sub_sampling": 0 + } + + pm = AedatDataLoaderPM(proc_params) + + self.assertIsInstance(pm, AedatDataLoaderPM) + self.assertEqual(pm._shape_out, proc_params["shape_out"]) + self.assertIsInstance(pm._file, AedatFile) + self.assertIsInstance(pm._stream, + _AedatFileEventNumpyPacketIterator) + self.assertIsInstance(pm._frame_shape, tuple) + self.assertIsInstance(pm._random_rng, np.random.Generator) + + def test_sub_sampling(self): + data_history = [ + [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [0], + [1, 1, 1], + [1], + [1], + [1] + ] + indices_history = [ + [1597, 2308, 2486, 2496, 2498, 1787, 2642, 2633, 2489, + 2488, 1596, 1729, 1727, 2500, 1780], + [1600, 1732, 2297, 1388, 2290, 2305, 3704, 3519, 1911], + [7138, 2301, 2471, 1601, 2982, 1364, 1379, 1386, 1384, + 2983, 1390, 2289, 1401, 1362, 2293], + [1910, 1382, 1909, 1562, 1606, 1381], + [464], + [2323, 1908, 1393], + [4062], + [1792], + [3889] + ] + seed_rng = 0 + rng = np.random.default_rng(seed=seed_rng) + + max_num_events = 10 + data_loader = AedatDataLoader(file_path="../dvs_recording.aedat4", + shape_out=(max_num_events,), + seed_sub_sampling=seed_rng) + recv_sparse = RecvSparse(shape=(max_num_events,)) + + data_loader.out_port.connect(recv_sparse.in_port) + + # Run parameters + num_steps = 9 + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=1) + + # Running + for i in range(num_steps): + data_loader.run(condition=run_cnd, run_cfg=run_cfg) + + expected_data = np.array(data_history[i]) + expected_indices = np.array(indices_history[i]) + + sent_and_received_data = \ + recv_sparse.data.get()[:expected_data.shape[0]] + sent_and_received_indices = \ + recv_sparse.idx.get()[:expected_indices.shape[0]] + + if expected_data.shape[0] > max_num_events: + data_idx_array = np.arange(0, expected_data.shape[0]) + sampled_idx = rng.choice(data_idx_array, + max_num_events, + replace=False) + + expected_data = expected_data[sampled_idx] + expected_indices = expected_indices[sampled_idx] + + np.testing.assert_equal(sent_and_received_data, + expected_data) + np.testing.assert_equal(sent_and_received_indices, + expected_indices) + + # Stopping + data_loader.stop() + + def test_run(self): + data_loader = AedatDataLoader(file_path="../dvs_recording.aedat4", + shape_out=(3000,), + seed_sub_sampling=0) + + # Run parameters + num_steps = 9 + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=num_steps) + + # Running + data_loader.run(condition=run_cnd, run_cfg=run_cfg) + + # Stopping + data_loader.stop() + + self.assertFalse(data_loader.runtime._is_running) + + def test_end_of_file(self): + data_loader = AedatDataLoader(file_path="../dvs_recording.aedat4", + shape_out=(3000,), + seed_sub_sampling=0) + + # Run parameters + num_steps = 10 + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=num_steps) + + # Running + data_loader.run(condition=run_cnd, run_cfg=run_cfg) + + # Stopping + data_loader.stop() + + self.assertFalse(data_loader.runtime._is_running) + + def test_index_encoding(self): + x_history = [ + [8, 12, 13, 13, 13, 9, 14, 14, 13, 13, 8, 9, 9, 13, 9], + [8, 9, 12, 7, 12, 12, 20, 19, 10], + [39, 12, 13, 8, 16, 7, 7, 7, 7, 16, 7, 12, 7, 7, 12], + [10, 7, 10, 8, 8, 7], + [2], + [12, 10, 7], + [22], + [9], + [21] + ] + y_history = [ + [157, 148, 146, 156, 158, 167, 122, 113, 149, 148, 156, + 109, 107, 160, 160], + [160, 112, 137, 128, 130, 145, 104, 99, 111], + [118, 141, 131, 161, 102, 104, 119, 126, 124, 103, 130, + 129, 141, 102, 133], + [110, 122, 109, 122, 166, 121], + [104], + [163, 108, 133], + [102], + [172], + [109] + ] + seed_rng = 0 + rng = np.random.default_rng(seed=seed_rng) + dense_shape = (240, 180) + + max_num_events = 10 + data_loader = AedatDataLoader(file_path="../dvs_recording.aedat4", + shape_out=(max_num_events,), + seed_sub_sampling=seed_rng) + recv_sparse = RecvSparse(shape=(max_num_events,)) + + data_loader.out_port.connect(recv_sparse.in_port) + + # Run parameters + num_steps = 9 + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=1) + + # Running + for i in range(num_steps): + data_loader.run(condition=run_cnd, run_cfg=run_cfg) + + expected_xs = np.array(x_history[i]) + expected_ys = np.array(y_history[i]) + + sent_and_received_indices = \ + recv_sparse.idx.get()[:expected_xs.shape[0]].astype(int) + + reconstructed_xs, reconstructed_ys = \ + np.unravel_index(sent_and_received_indices, dense_shape) + + if expected_xs.shape[0] > max_num_events: + data_idx_array = np.arange(0, expected_xs.shape[0]) + sampled_idx = rng.choice(data_idx_array, + max_num_events, + replace=False) + + expected_xs = expected_xs[sampled_idx] + expected_ys = expected_ys[sampled_idx] + + np.testing.assert_equal(reconstructed_xs, expected_xs) + np.testing.assert_equal(reconstructed_ys, expected_ys) + + # Stopping + data_loader.stop() + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/lava/proc/event_data/event_pre_processor/dense_to_dense/test_down_sampling_dense.py b/tests/lava/proc/event_data/event_pre_processor/dense_to_dense/test_down_sampling_dense.py new file mode 100644 index 000000000..090f477d0 --- /dev/null +++ b/tests/lava/proc/event_data/event_pre_processor/dense_to_dense/test_down_sampling_dense.py @@ -0,0 +1,330 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ +import unittest +from lava.proc.event_data.event_pre_processor.dense_to_dense.down_sampling_dense import DownSamplingDense, DownSamplingDensePM +from lava.proc.event_data.event_pre_processor.utils import DownSamplingMethodDense + +import numpy as np + +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.process.ports.ports import InPort, OutPort +from lava.magma.core.process.variable import Var + +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol +from lava.magma.core.model.py.ports import PyInPort, PyOutPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.resources import CPU +from lava.magma.core.decorator import implements, requires +from lava.magma.core.model.py.model import PyLoihiProcessModel + +from lava.magma.core.run_conditions import RunSteps +from lava.magma.core.run_configs import Loihi1SimCfg + +import matplotlib.pyplot as plt + + +class RecvDense(AbstractProcess): + def __init__(self, + shape: tuple) -> None: + super().__init__(shape=shape) + + self.in_port = InPort(shape=shape) + + self.data = Var(shape=shape, init=np.zeros(shape, dtype=int)) + + +@implements(proc=RecvDense, protocol=LoihiProtocol) +@requires(CPU) +class PyRecvDensePM(PyLoihiProcessModel): + in_port: PyInPort = LavaPyType(PyInPort.VEC_DENSE, int) + + data: np.ndarray = LavaPyType(np.ndarray, int) + + def run_spk(self) -> None: + data = self.in_port.recv() + + self.data = data + + +class SendDense(AbstractProcess): + def __init__(self, + shape: tuple, + data: np.ndarray) -> None: + super().__init__(shape=shape, data=data) + + self.out_port = OutPort(shape=shape) + + +@implements(proc=SendDense, protocol=LoihiProtocol) +@requires(CPU) +class PySendDensePM(PyLoihiProcessModel): + out_port: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, int) + + def __init__(self, proc_params): + super().__init__(proc_params) + self._data = proc_params["data"] + + def run_spk(self) -> None: + data = self._data + + self.out_port.send(data) + + +class TestProcessDownSamplingDense(unittest.TestCase): + def test_init(self): + """Tests instantiation of DownSamplingDense.""" + down_sampler = DownSamplingDense(shape_in=(240, 180), + down_sampling_method=DownSamplingMethodDense.CONVOLUTION, + down_sampling_factor=8) + + self.assertIsInstance(down_sampler, DownSamplingDense) + self.assertEqual(down_sampler.proc_params["shape_in"], (240, 180)) + self.assertEqual(down_sampler.proc_params["down_sampling_method"], DownSamplingMethodDense.CONVOLUTION) + self.assertEqual(down_sampler.proc_params["down_sampling_factor"], 8) + + def test_invalid_shape_in_negative_width_or_height(self): + """Checks if an error is raised when a negative width or height + for shape_in is given.""" + with(self.assertRaises(ValueError)): + _ = DownSamplingDense(shape_in=(-240, 180), + down_sampling_method=DownSamplingMethodDense.CONVOLUTION, + down_sampling_factor=8) + + with(self.assertRaises(ValueError)): + _ = DownSamplingDense(shape_in=(240, -180), + down_sampling_method=DownSamplingMethodDense.CONVOLUTION, + down_sampling_factor=8) + + def test_invalid_shape_in_decimal_width_or_height(self): + """Checks if an error is raised when a decimal width or height + for shape_in is given.""" + with(self.assertRaises(ValueError)): + _ = DownSamplingDense(shape_in=(240.5, 180), + down_sampling_method=DownSamplingMethodDense.CONVOLUTION, + down_sampling_factor=8) + + with(self.assertRaises(ValueError)): + _ = DownSamplingDense(shape_in=(240, 180.5), + down_sampling_method=DownSamplingMethodDense.CONVOLUTION, + down_sampling_factor=8) + + def test_invalid_shape_in_dimension(self): + """Checks if an error is raised when a 1d or 4d input shape is given.""" + with(self.assertRaises(ValueError)): + _ = DownSamplingDense(shape_in=(240,), + down_sampling_method=DownSamplingMethodDense.CONVOLUTION, + down_sampling_factor=8) + + with(self.assertRaises(ValueError)): + _ = DownSamplingDense(shape_in=(240, 180, 2, 1), + down_sampling_method=DownSamplingMethodDense.CONVOLUTION, + down_sampling_factor=8) + + def test_invalid_shape_in_third_dimension_not_2(self): + """Checks if an error is raised if the value of the 3rd dimension + for the shape_in parameter is not 2.""" + with(self.assertRaises(ValueError)): + _ = DownSamplingDense(shape_in=(240, 180, 1), + down_sampling_method=DownSamplingMethodDense.CONVOLUTION, + down_sampling_factor=8) + + def test_invalid_down_sampling_factor_negative(self): + """Checks if an error is raised if the given down sampling factor + is negative.""" + with(self.assertRaises(ValueError)): + _ = DownSamplingDense(shape_in=(240, 180), + down_sampling_method=DownSamplingMethodDense.CONVOLUTION, + down_sampling_factor=-8) + + def test_invalid_down_sampling_factor_decimal(self): + """Checks if an error is raised if the given down sampling factor is decimal.""" + with(self.assertRaises(ValueError)): + _ = DownSamplingDense(shape_in=(240, 180), + down_sampling_method=DownSamplingMethodDense.CONVOLUTION, + down_sampling_factor=8.5) + + def test_invalid_down_sampling_method(self): + """Checks if an error is raised if the given down sampling method is not of type + DownSamplingMethodDense.""" + with(self.assertRaises(TypeError)): + _ = DownSamplingDense(shape_in=(240, 180), + down_sampling_method="convolution", + down_sampling_factor=8) + + +# TODO (GK): Add tests for widths and heights not divisible by +# TODO (GK): down_sampling_factor +class TestProcessModelDownSamplingDense(unittest.TestCase): + def test_init(self): + proc_params = { + "shape_in": (240, 180), + "down_sampling_method": DownSamplingMethodDense.SKIPPING, + "down_sampling_factor": 8 + } + + pm = DownSamplingDensePM(proc_params) + + self.assertIsInstance(pm, DownSamplingDensePM) + self.assertEqual(pm._shape_in, proc_params["shape_in"]) + self.assertEqual(pm._down_sampling_method, + proc_params["down_sampling_method"]) + self.assertEqual(pm._down_sampling_factor, + proc_params["down_sampling_factor"]) + + def test_run(self): + data = np.zeros((8, 8)) + + send_dense = SendDense(shape=(8, 8), data=data) + down_sampler = DownSamplingDense(shape_in=(8, 8), + down_sampling_method=DownSamplingMethodDense.SKIPPING, + down_sampling_factor=2) + + send_dense.out_port.connect(down_sampler.in_port) + + # Run parameters + num_steps = 1 + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=num_steps) + + # Running + down_sampler.run(condition=run_cnd, run_cfg=run_cfg) + + # Stopping + down_sampler.stop() + + self.assertFalse(down_sampler.runtime._is_running) + + def test_down_sampling_skipping(self): + data = np.zeros((8, 8)) + data[0, 0] = 1 + data[1, 2] = 1 + data[2, 1] = 1 + + data[1, 5] = 1 + data[2, 7] = 1 + + data[4, 4] = 1 + + expected_data = np.zeros((2, 2)) + expected_data[0, 0] = 1 + expected_data[1, 1] = 1 + + send_dense = SendDense(shape=(8, 8), data=data) + down_sampler = DownSamplingDense(shape_in=(8, 8), + down_sampling_method=DownSamplingMethodDense.SKIPPING, + down_sampling_factor=4) + recv_dense = RecvDense(shape=(2, 2)) + + send_dense.out_port.connect(down_sampler.in_port) + down_sampler.out_port.connect(recv_dense.in_port) + + # Run parameters + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=1) + + # Running + send_dense.run(condition=run_cnd, run_cfg=run_cfg) + + sent_and_received_data = \ + recv_dense.data.get() + + send_dense.stop() + + np.testing.assert_equal(sent_and_received_data, + expected_data) + + def test_down_sampling_max_pooling(self): + data = np.zeros((8, 8)) + data[0, 0] = 1 + data[1, 2] = 1 + data[2, 1] = 1 + + data[1, 5] = 1 + data[2, 7] = 1 + + data[4, 4] = 1 + + expected_data = np.zeros((2, 2)) + expected_data[0, 0] = 1 + expected_data[0, 1] = 1 + expected_data[1, 1] = 1 + + send_dense = SendDense(shape=(8, 8), data=data) + down_sampler = DownSamplingDense(shape_in=(8, 8), + down_sampling_method=DownSamplingMethodDense.MAX_POOLING, + down_sampling_factor=4) + recv_dense = RecvDense(shape=(2, 2)) + + send_dense.out_port.connect(down_sampler.in_port) + down_sampler.out_port.connect(recv_dense.in_port) + + # Run parameters + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=1) + + # Running + send_dense.run(condition=run_cnd, run_cfg=run_cfg) + + sent_and_received_data = \ + recv_dense.data.get() + + send_dense.stop() + + np.testing.assert_equal(sent_and_received_data, + expected_data) + + def test_down_sampling_convolution(self): + data = np.zeros((8, 8)) + data[0, 0] = 1 + data[1, 2] = 1 + data[2, 1] = 1 + + data[1, 5] = 1 + data[2, 7] = 1 + + data[4, 4] = 1 + + expected_data = np.zeros((2, 2)) + expected_data[0, 0] = 3 + expected_data[0, 1] = 2 + expected_data[1, 1] = 1 + + send_dense = SendDense(shape=(8, 8), data=data) + down_sampler = DownSamplingDense(shape_in=(8, 8), + down_sampling_method=DownSamplingMethodDense.CONVOLUTION, + down_sampling_factor=4) + recv_dense = RecvDense(shape=(2, 2)) + + send_dense.out_port.connect(down_sampler.in_port) + down_sampler.out_port.connect(recv_dense.in_port) + + # Run parameters + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=1) + + # Running + send_dense.run(condition=run_cnd, run_cfg=run_cfg) + + sent_and_received_data = \ + recv_dense.data.get() + + send_dense.stop() + + # TODO : REMOVE THIS AFTER DEBUG + fig, (ax1, ax2, ax3) = plt.subplots(1, 3) + fig.suptitle('Max pooling') + ax1.imshow(data) + ax1.set_title("Data") + ax2.imshow(expected_data) + ax2.set_title("Expected data") + ax3.imshow(sent_and_received_data) + ax3.set_title("Actual data") + fig.show() + + np.testing.assert_equal(sent_and_received_data, + expected_data) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/lava/proc/event_data/event_pre_processor/dense_to_dense/test_flattening.py b/tests/lava/proc/event_data/event_pre_processor/dense_to_dense/test_flattening.py new file mode 100644 index 000000000..fb4cd6e77 --- /dev/null +++ b/tests/lava/proc/event_data/event_pre_processor/dense_to_dense/test_flattening.py @@ -0,0 +1,20 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ +import unittest + + +class TestProcessFlattening(unittest.TestCase): + def test_init(self): + """Test description""" + pass + + +class TestProcessModelFlattening(unittest.TestCase): + def test_init(self): + """Test description""" + pass + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/lava/proc/event_data/event_pre_processor/sparse_to_dense/test_sparse_to_dense.py b/tests/lava/proc/event_data/event_pre_processor/sparse_to_dense/test_sparse_to_dense.py new file mode 100644 index 000000000..542b051b6 --- /dev/null +++ b/tests/lava/proc/event_data/event_pre_processor/sparse_to_dense/test_sparse_to_dense.py @@ -0,0 +1,295 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ +import unittest +from lava.proc.event_data.event_pre_processor.sparse_to_dense.sparse_to_dense import SparseToDense, SparseToDensePM + +import numpy as np + +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.process.ports.ports import InPort, OutPort +from lava.magma.core.process.variable import Var + +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol +from lava.magma.core.model.py.ports import PyInPort, PyOutPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.resources import CPU +from lava.magma.core.decorator import implements, requires +from lava.magma.core.model.py.model import PyLoihiProcessModel + +from lava.magma.core.run_conditions import RunSteps +from lava.magma.core.run_configs import Loihi1SimCfg + +import matplotlib.pyplot as plt + + +class RecvDense(AbstractProcess): + def __init__(self, + shape: tuple) -> None: + super().__init__(shape=shape) + + self.in_port = InPort(shape=shape) + + self.data = Var(shape=shape, init=np.zeros(shape, dtype=int)) + + +@implements(proc=RecvDense, protocol=LoihiProtocol) +@requires(CPU) +class PyRecvDensePM(PyLoihiProcessModel): + in_port: PyInPort = LavaPyType(PyInPort.VEC_DENSE, int) + + data: np.ndarray = LavaPyType(np.ndarray, int) + + def run_spk(self) -> None: + data = self.in_port.recv() + + self.data = data + + +class SendSparse(AbstractProcess): + def __init__(self, + shape: tuple, + data: np.ndarray, + indices: np.ndarray) -> None: + super().__init__(shape=shape, data=data, indices=indices) + + self.out_port = OutPort(shape=shape) + + +@implements(proc=SendSparse, protocol=LoihiProtocol) +@requires(CPU) +class PySendSparsePM(PyLoihiProcessModel): + out_port: PyOutPort = LavaPyType(PyOutPort.VEC_SPARSE, int) + + def __init__(self, proc_params): + super().__init__(proc_params) + self._data = proc_params["data"] + self._indices = proc_params["indices"] + + def run_spk(self) -> None: + data = self._data + idx = self._indices + + self.out_port.send(data, idx) + + + + +class TestProcessSparseToDense(unittest.TestCase): + def test_init_2d(self): + """Tests instantiation of SparseToDense for a 2D output""" + sparse_to_dense = SparseToDense(shape_in=(43200,), + shape_out=(240, 180)) + + self.assertIsInstance(sparse_to_dense, SparseToDense) + self.assertEqual(sparse_to_dense.proc_params["shape_in"], (43200,)) + self.assertEqual(sparse_to_dense.proc_params["shape_out"], (240, 180)) + + def test_init_3d(self): + """Tests instantiation of SparseToDense for a 3D output""" + sparse_to_dense = SparseToDense(shape_in=(43200,), + shape_out=(240, 180, 2)) + + self.assertIsInstance(sparse_to_dense, SparseToDense) + self.assertEqual(sparse_to_dense.proc_params["shape_in"], (43200,)) + self.assertEqual(sparse_to_dense.proc_params["shape_out"], (240, 180, 2)) + + def test_invalid_shape_out_dimension(self): + """Check if an error is raised when shape_out is 1D or 4D""" + # TODO: should it rather raise not implemented error? + with(self.assertRaises(ValueError)): + _ = SparseToDense(shape_in=(43200,), + shape_out=(240,)) + + with(self.assertRaises(ValueError)): + _ = SparseToDense(shape_in=(43200,), + shape_out=(240, 180, 2, 1)) + + def test_invalid_shape_in_dimension(self): + """Check if an error is raised when shape_in is 2D""" + with(self.assertRaises(ValueError)): + _ = SparseToDense(shape_in=(43200, 1), + shape_out=(240, 180)) + + def test_invalid_shape_in_third_dimension_not_2(self): + """Checks if an error is raised if the value of the 3rd dimension + for the shape_out parameter is not 2""" + with(self.assertRaises(ValueError)): + _ = SparseToDense(shape_in=(43200,), + shape_out=(240, 180, 1)) + + def test_invalid_shape_in_negative_integer(self): + """Checks if an error is raised when a negative integer for shape_in + is given""" + with(self.assertRaises(ValueError)): + _ = SparseToDense(shape_in=(-43200,), + shape_out=(240, 180)) + + def test_invalid_shape_in_decimal(self): + """Checks if an error is raised when a decimal integer for shape_in + is given""" + with(self.assertRaises(ValueError)): + _ = SparseToDense(shape_in=(43200.5,), + shape_out=(240, 180)) + + def test_invalid_shape_out_negative_width_or_height(self): + """Checks if an error is raised when a negative width or height for + shape_out is given""" + with(self.assertRaises(ValueError)): + _ = SparseToDense(shape_in=(43200,), + shape_out=(-240, 180)) + + with(self.assertRaises(ValueError)): + _ = SparseToDense(shape_in=(43200,), + shape_out=(240, -180)) + + def test_invalid_shape_out_decimal_width_or_height(self): + """Checks if an error is raised when a decimal width or height for + shape_out is given""" + with(self.assertRaises(ValueError)): + _ = SparseToDense(shape_in=(43200,), + shape_out=(240.5, 180)) + + with(self.assertRaises(ValueError)): + _ = SparseToDense(shape_in=(43200,), + shape_out=(240, 180.5)) + + +class TestProcessModelSparseToDense(unittest.TestCase): + def test_init(self): + proc_params = { + "shape_out": (240, 180) + } + + pm = SparseToDensePM(proc_params) + + self.assertIsInstance(pm, SparseToDensePM) + self.assertEqual(pm._shape_out, proc_params["shape_out"]) + + def test_run(self): + data = np.array([1, 1, 1, 1, 1, 1]) + xs = [0, 1, 2, 1, 2, 4] + ys = [0, 2, 1, 5, 7, 4] + indices = np.ravel_multi_index((xs, ys), (8, 8)) + + send_sparse = SendSparse(shape=(10, ), data=data, indices=indices) + sparse_to_dense = SparseToDense(shape_in=(10, ), + shape_out=(8, 8)) + + send_sparse.out_port.connect(sparse_to_dense.in_port) + + # Run parameters + num_steps = 1 + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=num_steps) + + # Running + sparse_to_dense.run(condition=run_cnd, run_cfg=run_cfg) + + # Stopping + sparse_to_dense.stop() + + self.assertFalse(sparse_to_dense.runtime._is_running) + + def test_2d(self): + data = np.array([1, 1, 1, 1, 1, 1]) + xs = [0, 1, 2, 1, 2, 4] + ys = [0, 2, 1, 5, 7, 4] + indices = np.ravel_multi_index((xs, ys), (8, 8)) + + expected_data = np.zeros((8, 8)) + expected_data[0, 0] = 1 + expected_data[1, 2] = 1 + expected_data[2, 1] = 1 + + expected_data[1, 5] = 1 + expected_data[2, 7] = 1 + + expected_data[4, 4] = 1 + + send_sparse = SendSparse(shape=(10, ), data=data, indices=indices) + sparse_to_dense = SparseToDense(shape_in=(10, ), + shape_out=(8, 8)) + recv_dense = RecvDense(shape=(8, 8)) + + send_sparse.out_port.connect(sparse_to_dense.in_port) + sparse_to_dense.out_port.connect(recv_dense.in_port) + + # Run parameters + num_steps = 1 + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=num_steps) + + # Running + sparse_to_dense.run(condition=run_cnd, run_cfg=run_cfg) + + sent_and_received_data = \ + recv_dense.data.get() + + # Stopping + sparse_to_dense.stop() + + np.testing.assert_equal(sent_and_received_data, + expected_data) + + def test_3d(self): + data = np.array([1, 0, 1, 0, 1, 0]) + xs = [0, 1, 2, 1, 2, 4] + ys = [0, 2, 1, 5, 7, 4] + indices = np.ravel_multi_index((xs, ys), (8, 8)) + + expected_data = np.zeros((8, 8, 2)) + expected_data[0, 0, 1] = 1 + expected_data[1, 2, 0] = 1 + expected_data[2, 1, 1] = 1 + + expected_data[1, 5, 0] = 1 + expected_data[2, 7, 1] = 1 + + expected_data[4, 4, 0] = 1 + + send_sparse = SendSparse(shape=(10,), data=data, indices=indices) + sparse_to_dense = SparseToDense(shape_in=(10,), + shape_out=(8, 8, 2)) + recv_dense = RecvDense(shape=(8, 8, 2)) + + send_sparse.out_port.connect(sparse_to_dense.in_port) + sparse_to_dense.out_port.connect(recv_dense.in_port) + + # Run parameters + num_steps = 1 + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=num_steps) + + # Running + sparse_to_dense.run(condition=run_cnd, run_cfg=run_cfg) + + sent_and_received_data = \ + recv_dense.data.get() + + # Stopping + sparse_to_dense.stop() + + # # TODO : REMOVE THIS AFTER DEBUG + # expected_data_im = np.zeros((8, 8)) + # expected_data_im[expected_data[:, :, 0] == 1] = -1 + # expected_data_im[expected_data[:, :, 1] == 1] = 1 + # actual_data_im = np.zeros((8, 8)) + # actual_data_im[sent_and_received_data[:, :, 0] == 1] = -1 + # actual_data_im[sent_and_received_data[:, :, 1] == 1] = 1 + # + # fig, (ax1, ax2) = plt.subplots(1, 2) + # fig.suptitle('3D') + # ax1.imshow(expected_data_im) + # ax1.set_title("Expected data") + # ax2.imshow(actual_data_im) + # ax2.set_title("Actual data") + # + # fig.show() + + np.testing.assert_equal(sent_and_received_data, + expected_data) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/lava/proc/event_data/event_pre_processor/sparse_to_sparse/test_binary_to_unary_polarity.py b/tests/lava/proc/event_data/event_pre_processor/sparse_to_sparse/test_binary_to_unary_polarity.py new file mode 100644 index 000000000..0967f8669 --- /dev/null +++ b/tests/lava/proc/event_data/event_pre_processor/sparse_to_sparse/test_binary_to_unary_polarity.py @@ -0,0 +1,174 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ +import unittest +from lava.proc.event_data.event_pre_processor.sparse_to_sparse.binary_to_unary_polarity import \ + BinaryToUnaryPolarity, BinaryToUnaryPolarityPM + +import numpy as np + +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.process.ports.ports import InPort, OutPort +from lava.magma.core.process.variable import Var + +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol +from lava.magma.core.model.py.ports import PyInPort, PyOutPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.resources import CPU +from lava.magma.core.decorator import implements, requires +from lava.magma.core.model.py.model import PyLoihiProcessModel + +from lava.magma.core.run_conditions import RunSteps +from lava.magma.core.run_configs import Loihi1SimCfg + + +class RecvSparse(AbstractProcess): + def __init__(self, + shape: tuple) -> None: + super().__init__(shape=shape) + + self.in_port = InPort(shape=shape) + + self.data = Var(shape=shape, init=np.zeros(shape, dtype=int)) + self.idx = Var(shape=shape, init=np.zeros(shape, dtype=int)) + + +@implements(proc=RecvSparse, protocol=LoihiProtocol) +@requires(CPU) +class PyRecvSparsePM(PyLoihiProcessModel): + in_port: PyInPort = LavaPyType(PyInPort.VEC_SPARSE, int) + + data: np.ndarray = LavaPyType(np.ndarray, int) + idx: np.ndarray = LavaPyType(np.ndarray, int) + + def run_spk(self) -> None: + data, idx = self.in_port.recv() + + self.data = np.pad(data, + pad_width=( + 0, self.in_port.shape[0] - data.shape[0])) + self.idx = np.pad(idx, + pad_width=( + 0, self.in_port.shape[0] - data.shape[0])) + + +class SendSparse(AbstractProcess): + def __init__(self, + shape: tuple, + data: np.ndarray, + indices: np.ndarray) -> None: + super().__init__(shape=shape, data=data, indices=indices) + + self.out_port = OutPort(shape=shape) + + +@implements(proc=SendSparse, protocol=LoihiProtocol) +@requires(CPU) +class PySendSparsePM(PyLoihiProcessModel): + out_port: PyOutPort = LavaPyType(PyOutPort.VEC_SPARSE, int) + + def __init__(self, proc_params): + super().__init__(proc_params) + self._data = proc_params["data"] + self._indices = proc_params["indices"] + + def run_spk(self) -> None: + data = self._data + idx = self._indices + + self.out_port.send(data, idx) + + +class TestProcessBinaryToUnaryPolarity(unittest.TestCase): + def test_init(self): + """Tests instantiation of BinaryToUnaryPolarity""" + converter = BinaryToUnaryPolarity(shape=(43200,)) + + self.assertIsInstance(converter, BinaryToUnaryPolarity) + self.assertEqual(converter.proc_params["shape"], (43200,)) + + def test_invalid_shape_out_dimension(self): + """Test for an invalid shape""" + with(self.assertRaises(ValueError)): + _ = BinaryToUnaryPolarity(shape=(240, 180)) + + def test_negative_width(self): + """Tests for a negative width given""" + with(self.assertRaises(ValueError)): + _ = BinaryToUnaryPolarity(shape=(-43200,)) + + def test_decimal_width(self): + """Tests for a decimal width given""" + with(self.assertRaises(ValueError)): + _ = BinaryToUnaryPolarity(shape=(43200.5,)) + + +class TestProcessModelBinaryToUnaryPolarity(unittest.TestCase): + def test_init(self): + proc_params = { + "shape": (10,) + } + + pm = BinaryToUnaryPolarityPM(proc_params) + + self.assertIsInstance(pm, BinaryToUnaryPolarityPM) + + def test_binary_to_unary_polarity_encoding(self): + data = np.array([1, 1, 1, 0, 1, 0, 0, 1, 0]) + indices = np.array([1, 5, 4, 3, 3, 2, 0, 1, 0]) + + expected_data = data + expected_data[expected_data == 0] = 1 + + expected_indices = indices + + send_sparse = SendSparse(shape=(10, ), data=data, indices=indices) + binary_to_unary_encoder = BinaryToUnaryPolarity(shape=(10,)) + recv_sparse = RecvSparse(shape=(10, )) + + send_sparse.out_port.connect(binary_to_unary_encoder.in_port) + binary_to_unary_encoder.out_port.connect(recv_sparse.in_port) + + # Run parameters + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=1) + + # Running + send_sparse.run(condition=run_cnd, run_cfg=run_cfg) + + sent_and_received_data = \ + recv_sparse.data.get()[:expected_data.shape[0]] + sent_and_received_indices = \ + recv_sparse.idx.get()[:expected_indices.shape[0]] + + # Stopping + send_sparse.stop() + + np.testing.assert_equal(sent_and_received_data, + expected_data) + np.testing.assert_equal(sent_and_received_indices, + expected_indices) + + def test_run(self): + data = np.array([1, 1, 1, 0, 1, 0, 0, 1, 0]) + indices = np.array([1, 5, 4, 3, 3, 2, 0, 1, 0]) + + send_sparse = SendSparse(shape=(10,), data=data, indices=indices) + binary_to_unary_encoder = BinaryToUnaryPolarity(shape=(10,)) + + send_sparse.out_port.connect(binary_to_unary_encoder.in_port) + + # Run parameters + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=1) + + # Running + binary_to_unary_encoder.run(condition=run_cnd, run_cfg=run_cfg) + + binary_to_unary_encoder.stop() + + self.assertFalse(binary_to_unary_encoder.runtime._is_running) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/lava/proc/event_data/test_integration.py b/tests/lava/proc/event_data/test_integration.py new file mode 100644 index 000000000..e194c1d56 --- /dev/null +++ b/tests/lava/proc/event_data/test_integration.py @@ -0,0 +1,161 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import unittest + +from lava.proc.event_data.event_pre_processor.utils import DownSamplingMethodDense + +from lava.proc.event_data.event_data_loader.aedat_data_loader import AedatDataLoader +from lava.proc.event_data.event_pre_processor.sparse_to_sparse.binary_to_unary_polarity \ + import BinaryToUnaryPolarity +from lava.proc.event_data.event_pre_processor.sparse_to_dense.sparse_to_dense import \ + SparseToDense +from lava.proc.event_data.event_pre_processor.dense_to_dense.down_sampling_dense import \ + DownSamplingDense + +import numpy as np + +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.process.ports.ports import InPort, OutPort +from lava.magma.core.process.variable import Var + +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol +from lava.magma.core.model.py.ports import PyInPort, PyOutPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.resources import CPU +from lava.magma.core.decorator import implements, requires +from lava.magma.core.model.py.model import PyLoihiProcessModel + +from lava.magma.core.run_conditions import RunSteps +from lava.magma.core.run_configs import Loihi1SimCfg + +import matplotlib.pyplot as plt + + +class RecvDense(AbstractProcess): + def __init__(self, + shape: tuple) -> None: + super().__init__(shape=shape) + + self.in_port = InPort(shape=shape) + + self.data = Var(shape=shape, init=np.zeros(shape, dtype=int)) + + +@implements(proc=RecvDense, protocol=LoihiProtocol) +@requires(CPU) +class PyRecvDensePM(PyLoihiProcessModel): + in_port: PyInPort = LavaPyType(PyInPort.VEC_DENSE, int) + + data: np.ndarray = LavaPyType(np.ndarray, int) + + def run_spk(self) -> None: + data = self.in_port.recv() + + self.data = data + + +class TestEventDataIntegration(unittest.TestCase): + def test_integration(self): + x_history = [ + [8, 12, 13, 13, 13, 9, 14, 14, 13, 13, 8, 9, 9, 13, 9], + [8, 9, 12, 7, 12, 12, 20, 19, 10], + [39, 12, 13, 8, 16, 7, 7, 7, 7, 16, 7, 12, 7, 7, 12], + [10, 7, 10, 8, 8, 7], + [2], + [12, 10, 7], + [22], + [9], + [21] + ] + y_history = [ + [157, 148, 146, 156, 158, 167, 122, 113, 149, 148, 156, + 109, 107, 160, 160], + [160, 112, 137, 128, 130, 145, 104, 99, 111], + [118, 141, 131, 161, 102, 104, 119, 126, 124, 103, 130, + 129, 141, 102, 133], + [110, 122, 109, 122, 166, 121], + [104], + [163, 108, 133], + [102], + [172], + [109] + ] + seed_rng = 0 + rng = np.random.default_rng(seed=seed_rng) + + # AedatDataLoader parameters + adl_file_path = "dvs_recording.aedat4" + adl_max_num_events_out = 10 + adl_shape_out = (adl_max_num_events_out,) + # BinaryToUnaryPolarity parameters + btup_shape = adl_shape_out + # SparseToDense parameters + std_shape_in = btup_shape + std_width_out = 240 + std_height_out = 180 + std_shape_out = (std_width_out, std_height_out) + # DownSamplingDense parameters + dss_shape_in = std_shape_out + dss_down_sampling_method = DownSamplingMethodDense.MAX_POOLING + dss_down_sampling_factor = 1 + # RecvDense parameters + rd_shape = (dss_shape_in[0] // dss_down_sampling_factor, + dss_shape_in[1] // dss_down_sampling_factor) + + # Instantiating Processes + aedat_data_loader = AedatDataLoader(file_path=adl_file_path, + shape_out=adl_shape_out, + seed_sub_sampling=seed_rng) + binary_to_unary_polarity = BinaryToUnaryPolarity(shape=btup_shape) + sparse_to_dense = SparseToDense(shape_in=std_shape_in, + shape_out=std_shape_out) + down_sampling_dense = DownSamplingDense( + shape_in=dss_shape_in, + down_sampling_method=dss_down_sampling_method, + down_sampling_factor=dss_down_sampling_factor + ) + recv_dense = RecvDense(shape=rd_shape) + + # Connecting Processes + aedat_data_loader.out_port.connect(binary_to_unary_polarity.in_port) + binary_to_unary_polarity.out_port.connect(sparse_to_dense.in_port) + sparse_to_dense.out_port.connect(down_sampling_dense.in_port) + down_sampling_dense.out_port.connect(recv_dense.in_port) + + # Run parameters + num_steps = 9 + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=1) + + # Running + for i in range(num_steps): + aedat_data_loader.run(condition=run_cnd, run_cfg=run_cfg) + + xs = np.array(x_history[i]) + ys = np.array(y_history[i]) + + sent_and_received_data = \ + recv_dense.data.get().astype(int) + + if xs.shape[0] > adl_max_num_events_out: + data_idx_array = np.arange(0, xs.shape[0]) + sampled_idx = rng.choice(data_idx_array, + adl_max_num_events_out, + replace=False) + + xs = xs[sampled_idx] + ys = ys[sampled_idx] + + expected_data = np.zeros(std_shape_out) + expected_data[xs, ys] = 1 + + np.testing.assert_equal(sent_and_received_data, expected_data) + + # Stopping + aedat_data_loader.stop() + + +if __name__ == '__main__': + unittest.main() From 77dc9654159351e27eccca1d829ba31d2fa0a837 Mon Sep 17 00:00:00 2001 From: gkarray Date: Fri, 2 Dec 2022 14:36:07 +0100 Subject: [PATCH 02/32] Adding Flattening implementation --- .../dense_to_dense/flattening.py | 22 ++++++++----------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/src/lava/proc/event_data/event_pre_processor/dense_to_dense/flattening.py b/src/lava/proc/event_data/event_pre_processor/dense_to_dense/flattening.py index 9409e3134..ce319707a 100644 --- a/src/lava/proc/event_data/event_pre_processor/dense_to_dense/flattening.py +++ b/src/lava/proc/event_data/event_pre_processor/dense_to_dense/flattening.py @@ -16,34 +16,30 @@ from lava.magma.core.decorator import implements, requires from lava.magma.core.model.py.model import PyLoihiProcessModel +import math + class Flattening(AbstractProcess): def __init__(self, shape_in: tuple, - shape_out: tuple, **kwargs) -> None: super().__init__(shape_in=shape_in, - shape_out=shape_out, **kwargs) - raise NotImplementedError() - # TODO: Validation - self.in_port = InPort() - self.out_port = OutPort() + shape_out = (math.prod(shape_in),) + + self.in_port = InPort(shape_in) + self.out_port = OutPort(shape_out) @implements(proc=Flattening, protocol=LoihiProtocol) @requires(CPU) class FlatteningPM(PyLoihiProcessModel): - in_port: PyOutPort = LavaPyType(PyInPort.VEC_DENSE, int) + in_port: PyInPort = LavaPyType(PyInPort.VEC_DENSE, int) out_port: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, int) - def __init__(self, proc_params: dict) -> None: - super().__init__(proc_params) - raise NotImplementedError() - def run_spk(self) -> None: - raise NotImplementedError() - + data = self.in_port.recv() + self.out_port.send(data.flatten()) From d1ac4f5fb8c56bf98e48d959a3146c5b3d94b7a2 Mon Sep 17 00:00:00 2001 From: "Cohen-Dumani, Joshua" Date: Fri, 2 Dec 2022 15:46:45 +0100 Subject: [PATCH 03/32] flattening proc, pm + all unit tests --- .../dense_to_dense/flattening.py | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/src/lava/proc/event_data/event_pre_processor/dense_to_dense/flattening.py b/src/lava/proc/event_data/event_pre_processor/dense_to_dense/flattening.py index ce319707a..755798071 100644 --- a/src/lava/proc/event_data/event_pre_processor/dense_to_dense/flattening.py +++ b/src/lava/proc/event_data/event_pre_processor/dense_to_dense/flattening.py @@ -28,11 +28,34 @@ def __init__(self, # TODO: Validation + self._validate_shape_in(shape_in) + shape_out = (math.prod(shape_in),) self.in_port = InPort(shape_in) self.out_port = OutPort(shape_out) + @staticmethod + def _validate_shape_in(shape_in): + if not (len(shape_in) == 2 or len(shape_in) == 3): + raise ValueError(f"shape_in should be 2 or 3 dimensional. " + f"{shape_in} given.") + + if not isinstance(shape_in[0], int) or not isinstance(shape_in[1], int): + raise ValueError(f"Width and height of shape_in should be integers." + f"{shape_in} given.") + if len(shape_in) == 3: + if shape_in[2] != 2: + raise ValueError(f"Third dimension of shape_in should be " + f"equal to 2." + f"{shape_in} given.") + + if shape_in[0] <= 0 or shape_in[1] <= 0: + raise ValueError(f"Width and height of shape_in should be positive." + f"{shape_in} given.") + + return shape_in + @implements(proc=Flattening, protocol=LoihiProtocol) @requires(CPU) @@ -40,6 +63,10 @@ class FlatteningPM(PyLoihiProcessModel): in_port: PyInPort = LavaPyType(PyInPort.VEC_DENSE, int) out_port: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, int) + def __init__(self, proc_params: dict) -> None: + super().__init__(proc_params) + self._shape_in = proc_params["shape_in"] + def run_spk(self) -> None: data = self.in_port.recv() self.out_port.send(data.flatten()) From 5a89aa623ab076a3e2f05b3a4e9f7bb27298703b Mon Sep 17 00:00:00 2001 From: "Cohen-Dumani, Joshua" Date: Fri, 2 Dec 2022 15:48:22 +0100 Subject: [PATCH 04/32] flattening unit tests --- .../dense_to_dense/test_flattening.py | 188 +++++++++++++++++- 1 file changed, 184 insertions(+), 4 deletions(-) diff --git a/tests/lava/proc/event_data/event_pre_processor/dense_to_dense/test_flattening.py b/tests/lava/proc/event_data/event_pre_processor/dense_to_dense/test_flattening.py index fb4cd6e77..dde17f9ac 100644 --- a/tests/lava/proc/event_data/event_pre_processor/dense_to_dense/test_flattening.py +++ b/tests/lava/proc/event_data/event_pre_processor/dense_to_dense/test_flattening.py @@ -2,18 +2,198 @@ # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ import unittest +from lava.proc.event_data.event_pre_processor.dense_to_dense.flattening import Flattening, FlatteningPM +import numpy as np + +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.process.ports.ports import InPort, OutPort +from lava.magma.core.process.variable import Var + +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol +from lava.magma.core.model.py.ports import PyInPort, PyOutPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.resources import CPU +from lava.magma.core.decorator import implements, requires +from lava.magma.core.model.py.model import PyLoihiProcessModel + +from lava.magma.core.run_conditions import RunSteps +from lava.magma.core.run_configs import Loihi1SimCfg + +import matplotlib.pyplot as plt + +class RecvDense(AbstractProcess): + def __init__(self, + shape: tuple) -> None: + super().__init__(shape=shape) + + self.in_port = InPort(shape=shape) + + self.data = Var(shape=shape, init=np.zeros(shape, dtype=int)) + + +@implements(proc=RecvDense, protocol=LoihiProtocol) +@requires(CPU) +class PyRecvDensePM(PyLoihiProcessModel): + in_port: PyInPort = LavaPyType(PyInPort.VEC_DENSE, int) + + data: np.ndarray = LavaPyType(np.ndarray, int) + + def run_spk(self) -> None: + data = self.in_port.recv() + + self.data = data + + +class SendDense(AbstractProcess): + def __init__(self, + shape: tuple, + data: np.ndarray) -> None: + super().__init__(shape=shape, data=data) + + self.out_port = OutPort(shape=shape) + + +@implements(proc=SendDense, protocol=LoihiProtocol) +@requires(CPU) +class PySendDensePM(PyLoihiProcessModel): + out_port: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, int) + + def __init__(self, proc_params): + super().__init__(proc_params) + self._data = proc_params["data"] + + def run_spk(self) -> None: + data = self._data + + self.out_port.send(data) class TestProcessFlattening(unittest.TestCase): def test_init(self): - """Test description""" - pass + """Tests instantiation of DownSamplingDense.""" + flattener = Flattening(shape_in=(240, 180)) + + self.assertIsInstance(flattener, Flattening) + self.assertEqual(flattener.proc_params["shape_in"], (240, 180)) + + def test_invalid_shape_in_negative_width_or_height(self): + """Checks if an error is raised when a negative width or height + for shape_in is given.""" + with(self.assertRaises(ValueError)): + _ = Flattening(shape_in=(-240, 180)) + + with(self.assertRaises(ValueError)): + _ = Flattening(shape_in=(240, -180)) + + def test_invalid_shape_in_decimal_width_or_height(self): + """Checks if an error is raised when a decimal width or height + for shape_in is given.""" + with(self.assertRaises(ValueError)): + _ = Flattening(shape_in=(240.5, 180)) + + with(self.assertRaises(ValueError)): + _ = Flattening(shape_in=(240, 180.5)) + + def test_invalid_shape_in_dimension(self): + """Checks if an error is raised when a 1d or 4d input shape is given.""" + with(self.assertRaises(ValueError)): + _ = Flattening(shape_in=(240,)) + + with(self.assertRaises(ValueError)): + _ = Flattening(shape_in=(240, 180, 2, 1)) + + def test_invalid_shape_in_third_dimension_not_2(self): + """Checks if an error is raised if the value of the 3rd dimension + for the shape_in parameter is not 2.""" + with(self.assertRaises(ValueError)): + _ = Flattening(shape_in=(240, 180, 1)) class TestProcessModelFlattening(unittest.TestCase): def test_init(self): - """Test description""" - pass + proc_params = { + "shape_in": (240, 180) + } + + pm = FlatteningPM(proc_params) + + self.assertIsInstance(pm, FlatteningPM) + self.assertEqual(pm._shape_in, proc_params["shape_in"]) + + def test_run(self): + data = np.zeros((8, 8)) + + send_dense = SendDense(shape=(8, 8), data=data) + flattener = Flattening(shape_in=(8, 8)) + + send_dense.out_port.connect(flattener.in_port) + + # Run parameters + num_steps = 1 + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=num_steps) + + # Running + flattener.run(condition=run_cnd, run_cfg=run_cfg) + + # Stopping + flattener.stop() + + self.assertFalse(flattener.runtime._is_running) + + def test_flattening_2d(self): + data = np.zeros((8, 8)) + + expected_data = np.zeros((64,)) + + send_dense = SendDense(shape=(8, 8), data=data) + flattener = Flattening(shape_in=(8, 8)) + recv_dense = RecvDense(shape=(64,)) + + send_dense.out_port.connect(flattener.in_port) + flattener.out_port.connect(recv_dense.in_port) + + # Run parameters + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=1) + + # Running + send_dense.run(condition=run_cnd, run_cfg=run_cfg) + + sent_and_received_data = \ + recv_dense.data.get() + + send_dense.stop() + + np.testing.assert_equal(sent_and_received_data, + expected_data) + + def test_flattening_3d(self): + data = np.zeros((8, 8, 2)) + + expected_data = np.zeros((128,)) + + send_dense = SendDense(shape=(8, 8, 2), data=data) + flattener = Flattening(shape_in=(8, 8, 2)) + recv_dense = RecvDense(shape=(128,)) + + send_dense.out_port.connect(flattener.in_port) + flattener.out_port.connect(recv_dense.in_port) + + # Run parameters + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=1) + + # Running + send_dense.run(condition=run_cnd, run_cfg=run_cfg) + + sent_and_received_data = \ + recv_dense.data.get() + + send_dense.stop() + + np.testing.assert_equal(sent_and_received_data, + expected_data) if __name__ == '__main__': From 90184668c9ccc38610e23034874f4056bd67b6a7 Mon Sep 17 00:00:00 2001 From: "Cohen-Dumani, Joshua" Date: Mon, 5 Dec 2022 14:05:28 +0100 Subject: [PATCH 05/32] addressed PR comments, still TODOs --- .../event_data_loader/aedat_data_loader.py | 65 +++++------- .../test_aedat_data_loader.py | 98 +++++++++---------- 2 files changed, 76 insertions(+), 87 deletions(-) diff --git a/src/lava/proc/event_data/event_data_loader/aedat_data_loader.py b/src/lava/proc/event_data/event_data_loader/aedat_data_loader.py index fec88f025..8cd7a556d 100644 --- a/src/lava/proc/event_data/event_data_loader/aedat_data_loader.py +++ b/src/lava/proc/event_data/event_data_loader/aedat_data_loader.py @@ -2,13 +2,11 @@ # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ -import typing as ty - from dv import AedatFile import numpy as np -import random -from operator import itemgetter -import os +import os.path +import typing as ty +import warnings from lava.magma.core.process.process import AbstractProcess from lava.magma.core.process.ports.ports import OutPort @@ -23,14 +21,13 @@ class AedatDataLoader(AbstractProcess): def __init__(self, + *, file_path: str, - shape_out: tuple, - seed_sub_sampling: int = None, - **kwargs) -> None: + shape_out: ty.Tuple[int], + seed_sub_sampling: ty.Optional[int] = None) -> None: super().__init__(file_path=file_path, shape_out=shape_out, - seed_sub_sampling=seed_sub_sampling, - **kwargs) + seed_sub_sampling=seed_sub_sampling) self._validate_file_path(file_path) self._validate_shape_out(shape_out) @@ -38,38 +35,29 @@ def __init__(self, self.out_port = OutPort(shape=shape_out) @staticmethod - def _validate_file_path(file_path): + def _validate_file_path(file_path: str) -> None: # Checking file extension - if not file_path[-7:] == ".aedat4": - raise ValueError(f"Given file should be an .aedat4 file. " - f"{file_path} given.") + if not file_path.lower().endswith('.aedat4'): + raise ValueError(f"AedatDataLoader currently only supports aedat4 files (*.aedat4). " + f"{file_path} was given.") try: - # Checking file size - if os.stat(file_path).st_size > 0: - return file_path - except FileNotFoundError: + # Checking if file exists + os.path.isfile(file_path) + except FileNotFoundError as error: # Checking file exists - raise FileNotFoundError(f"File not found. {file_path} given.") - - return file_path + raise FileNotFoundError(f"File not found. {file_path} given.") # TODO: rewrite this? @staticmethod - def _validate_shape_out(shape_out): - if not isinstance(shape_out[0], int): - raise ValueError(f"Max number of events should be an integer." + def _validate_shape_out(shape_out: ty.Tuple[int]) -> None: + if len(shape_out) != 1: + raise ValueError(f"Shape of the OutPort should have a shape of (n,). " f"{shape_out} given.") if shape_out[0] <= 0: raise ValueError(f"Max number of events should be positive. " f"{shape_out} given.") - if len(shape_out) != 1: - raise ValueError(f"Shape of the OutPort should be 1D. " - f"{shape_out} given.") - - return shape_out - @implements(proc=AedatDataLoader, protocol=LoihiProtocol) @requires(CPU) @@ -95,9 +83,7 @@ def _init_aedat_file(self) -> None: def run_spk(self) -> None: events = self._get_next_event_batch() - xs, ys, ps = events['x'], events['y'], events['polarity'] - - data, indices = self._encode_data_and_indices(xs, ys, ps) + data, indices = self._encode_data_and_indices(events) if data.shape[0] > self._shape_out[0]: # If we have more data than our shape allows, subsample @@ -109,29 +95,32 @@ def _get_next_event_batch(self): try: events = self._stream.__next__() except StopIteration: + # does this reset the iterator and restart the file? add comment for clarification self._init_aedat_file() events = self._stream.__next__() return events def _encode_data_and_indices(self, - xs: np.ndarray, - ys: np.ndarray, - ps: np.ndarray) \ + events: dict) \ -> ty.Tuple[np.ndarray, np.ndarray]: + + xs, ys, ps = events['x'], events['y'], events['polarity'] data = ps indices = np.ravel_multi_index((xs, ys), self._frame_shape) return data, indices - def _sub_sample(self, + def _sub_sample(self, # TODO: make a function, add max_events as parameter data: np.ndarray, indices: np.ndarray) \ -> ty.Tuple[np.ndarray, np.ndarray]: - # TODO: print a warning if subsampling, say how much data has been lost data_idx_array = np.arange(0, data.shape[0]) sampled_idx = self._random_rng.choice(data_idx_array, self._shape_out[0], replace=False) + percent_data_lost = (1 - self._shape_out[0]/data.shape[0])*100 + warnings.warn(f"Out port shape is too small to send all the received data. " + f"Around {percent_data_lost}% of the data has been lost.") # TODO: rewrite error message return data[sampled_idx], indices[sampled_idx] diff --git a/tests/lava/proc/event_data/event_data_loader/test_aedat_data_loader.py b/tests/lava/proc/event_data/event_data_loader/test_aedat_data_loader.py index 9be8c4650..b508750ad 100644 --- a/tests/lava/proc/event_data/event_data_loader/test_aedat_data_loader.py +++ b/tests/lava/proc/event_data/event_data_loader/test_aedat_data_loader.py @@ -1,33 +1,34 @@ # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ -import unittest -from lava.proc.event_data.event_data_loader.aedat_data_loader import AedatDataLoader, \ - AedatDataLoaderPM + from dv import AedatFile from dv.AedatFile import _AedatFileEventNumpyPacketIterator - -import typing as ty import numpy as np +import typing as ty +import unittest + +from lava.proc.event_data.event_data_loader.aedat_data_loader import AedatDataLoader, \ + AedatDataLoaderPM from lava.magma.core.process.process import AbstractProcess from lava.magma.core.process.ports.ports import InPort, OutPort from lava.magma.core.process.variable import Var +from lava.magma.core.resources import CPU from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol from lava.magma.core.model.py.ports import PyInPort, PyOutPort from lava.magma.core.model.py.type import LavaPyType -from lava.magma.core.resources import CPU from lava.magma.core.decorator import implements, requires from lava.magma.core.model.py.model import PyLoihiProcessModel from lava.magma.core.run_conditions import RunSteps from lava.magma.core.run_configs import Loihi1SimCfg - +# TODO : check that this way to structure imports is ok class RecvSparse(AbstractProcess): def __init__(self, - shape: tuple) -> None: + shape: ty.Tuple[int]) -> None: super().__init__(shape=shape) self.in_port = InPort(shape=shape) @@ -55,7 +56,7 @@ def run_spk(self) -> None: class TestProcessAedatDataLoader(unittest.TestCase): def test_init(self): - """Tests instantiation of AedatDataLoader""" + """Tests instantiation of AedatDataLoader.""" data_loader = AedatDataLoader(file_path="../dvs_recording.aedat4", shape_out=(43200,)) @@ -64,38 +65,37 @@ def test_init(self): "../dvs_recording.aedat4") self.assertEqual(data_loader.proc_params["shape_out"], (43200,)) - def test_invalid_file_path_extension(self): - """Tests for invalid file extension""" + def test_unsupported_file_extension_throws_exception(self): + """Tests whether a file_path argument with an unsupported file extension throws an exception.""" with(self.assertRaises(ValueError)): - # Test for .py - _ = AedatDataLoader(file_path="test_aedat_data_loader.py", - shape_out=(43200,)) + AedatDataLoader(file_path="test_aedat_data_loader.py", + shape_out=(43200,)) - def test_invalid_file_path_missing_file(self): - """Tests for missing file""" + def test_missing_file_throws_exception(self): + """Tests whether an exception is thrown when a specified file does not exist.""" with(self.assertRaises(FileNotFoundError)): - _ = AedatDataLoader(file_path="missing_file.aedat4", - shape_out=(43200,)) + AedatDataLoader(file_path="missing_file.aedat4", + shape_out=(43200,)) - def test_invalid_shape_out_dimension(self): - """Test for an invalid shape given to the shape_out param""" + def test_too_many_dimensions_throws_exception(self): + """Tests whether a shape_out argument with too many dimensions throws an exception.""" with(self.assertRaises(ValueError)): - _ = AedatDataLoader(file_path="../dvs_recording.aedat4", - shape_out=(240, 180)) + AedatDataLoader(file_path="../dvs_recording.aedat4", + shape_out=(240, 180)) - def test_invalid_shape_out_negative_integer(self): - """Tests for a negative width given""" + def test_negative_width_throws_exception(self): + """Tests whether a shape_out argument with a negative width throws an exception.""" with(self.assertRaises(ValueError)): - _ = AedatDataLoader(file_path="../dvs_recording.aedat4", - shape_out=(-43200,)) - - def test_invalid_shape_out_decimal(self): - """Tests for a decimal width given""" - with(self.assertRaises(ValueError)): - _ = AedatDataLoader(file_path="../dvs_recording.aedat4", - shape_out=(43200.5,)) + AedatDataLoader(file_path="../dvs_recording.aedat4", + shape_out=(-43200,)) + # def test_invalid_shape_out_decimal(self): + # """Tests for a decimal width given.""" + # with(self.assertRaises(ValueError)): + # AedatDataLoader(file_path="../dvs_recording.aedat4", + # shape_out=(43200.5,)) +# TODO: add doc strings class TestProcessModelAedatDataLoader(unittest.TestCase): def test_init(self): proc_params = { @@ -114,7 +114,22 @@ def test_init(self): self.assertIsInstance(pm._frame_shape, tuple) self.assertIsInstance(pm._random_rng, np.random.Generator) + def test_run_without_sub_sampling(self): + data_loader = AedatDataLoader(file_path="../dvs_recording.aedat4", + shape_out=(3000,)) + + num_steps = 9 + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=num_steps) + + data_loader.run(condition=run_cnd, run_cfg=run_cfg) + + data_loader.stop() + + # TODO: add asserts on the output to show functionality without subsampling + def test_sub_sampling(self): + # TODO: reduce size of this (less timesteps?) data_history = [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1], @@ -172,6 +187,7 @@ def test_sub_sampling(self): sampled_idx = rng.choice(data_idx_array, max_num_events, replace=False) + # TODO: assert that after subsampling, the number of events is the maximum. Could also hard code expected events expected_data = expected_data[sampled_idx] expected_indices = expected_indices[sampled_idx] @@ -184,23 +200,7 @@ def test_sub_sampling(self): # Stopping data_loader.stop() - def test_run(self): - data_loader = AedatDataLoader(file_path="../dvs_recording.aedat4", - shape_out=(3000,), - seed_sub_sampling=0) - - # Run parameters - num_steps = 9 - run_cfg = Loihi1SimCfg() - run_cnd = RunSteps(num_steps=num_steps) - - # Running - data_loader.run(condition=run_cnd, run_cfg=run_cfg) - - # Stopping - data_loader.stop() - - self.assertFalse(data_loader.runtime._is_running) +# TODO: add another test that runs the process twice with different seeds and asserts that the events have been sampled differently def test_end_of_file(self): data_loader = AedatDataLoader(file_path="../dvs_recording.aedat4", From 18fff7d109debecf7da7e215aa448b1b417b2f75 Mon Sep 17 00:00:00 2001 From: "Cohen-Dumani, Joshua" Date: Mon, 5 Dec 2022 14:54:07 +0100 Subject: [PATCH 06/32] addressed remaining PR comments, still TODOs --- .../event_data_loader/aedat_data_loader.py | 57 ++++++------ .../dense_to_dense/down_sampling_dense.py | 7 +- .../test_aedat_data_loader.py | 90 ++++++++----------- 3 files changed, 72 insertions(+), 82 deletions(-) diff --git a/src/lava/proc/event_data/event_data_loader/aedat_data_loader.py b/src/lava/proc/event_data/event_data_loader/aedat_data_loader.py index 8cd7a556d..dce3aa7c9 100644 --- a/src/lava/proc/event_data/event_data_loader/aedat_data_loader.py +++ b/src/lava/proc/event_data/event_data_loader/aedat_data_loader.py @@ -10,7 +10,6 @@ from lava.magma.core.process.process import AbstractProcess from lava.magma.core.process.ports.ports import OutPort - from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol from lava.magma.core.model.py.ports import PyOutPort from lava.magma.core.model.py.type import LavaPyType @@ -24,10 +23,12 @@ def __init__(self, *, file_path: str, shape_out: ty.Tuple[int], - seed_sub_sampling: ty.Optional[int] = None) -> None: + seed_sub_sampling: ty.Optional[int] = None, + **kwargs) -> None: super().__init__(file_path=file_path, shape_out=shape_out, - seed_sub_sampling=seed_sub_sampling) + seed_sub_sampling=seed_sub_sampling, + **kwargs) self._validate_file_path(file_path) self._validate_shape_out(shape_out) @@ -41,22 +42,19 @@ def _validate_file_path(file_path: str) -> None: raise ValueError(f"AedatDataLoader currently only supports aedat4 files (*.aedat4). " f"{file_path} was given.") - try: - # Checking if file exists - os.path.isfile(file_path) - except FileNotFoundError as error: - # Checking file exists - raise FileNotFoundError(f"File not found. {file_path} given.") # TODO: rewrite this? + # Checking if file exists + if not os.path.isfile(file_path): + raise FileNotFoundError(f"File not found. {file_path} given.") @staticmethod def _validate_shape_out(shape_out: ty.Tuple[int]) -> None: if len(shape_out) != 1: raise ValueError(f"Shape of the OutPort should have a shape of (n,). " - f"{shape_out} given.") + f"{shape_out} was given.") if shape_out[0] <= 0: raise ValueError(f"Max number of events should be positive. " - f"{shape_out} given.") + f"{shape_out} was given.") @implements(proc=AedatDataLoader, protocol=LoihiProtocol) @@ -73,8 +71,7 @@ def __init__(self, proc_params: dict) -> None: self._frame_shape = (self._file["events"].size_x, self._file["events"].size_y) - seed_sub_sampling = proc_params["seed_sub_sampling"] - self._random_rng = np.random.default_rng(seed_sub_sampling) + self._seed_sub_sampling = proc_params["seed_sub_sampling"] def _init_aedat_file(self) -> None: self._file = AedatFile(file_name=self._file_path) @@ -85,22 +82,22 @@ def run_spk(self) -> None: data, indices = self._encode_data_and_indices(events) - if data.shape[0] > self._shape_out[0]: - # If we have more data than our shape allows, subsample - data, indices = self._sub_sample(data, indices) + data, indices = sub_sample(data, indices, self._shape_out[0], self._seed_sub_sampling) self.out_port.send(data, indices) def _get_next_event_batch(self): try: + # If end of file, raise StopIteration error. events = self._stream.__next__() except StopIteration: - # does this reset the iterator and restart the file? add comment for clarification + # Reset the iterator and loop back to the start of the file. self._init_aedat_file() events = self._stream.__next__() return events +# TODO: change type annotation of events def _encode_data_and_indices(self, events: dict) \ -> ty.Tuple[np.ndarray, np.ndarray]: @@ -111,16 +108,22 @@ def _encode_data_and_indices(self, return data, indices - def _sub_sample(self, # TODO: make a function, add max_events as parameter - data: np.ndarray, - indices: np.ndarray) \ - -> ty.Tuple[np.ndarray, np.ndarray]: +def sub_sample(data: np.ndarray, + indices: np.ndarray, + max_events: int, + seed_random: ty.Optional[int] = 0) \ + -> ty.Tuple[np.ndarray, np.ndarray]: + # If we have more data than our shape allows, subsample + if data.shape[0] > max_events: + random_rng = np.random.default_rng(seed_random) data_idx_array = np.arange(0, data.shape[0]) - sampled_idx = self._random_rng.choice(data_idx_array, - self._shape_out[0], - replace=False) - percent_data_lost = (1 - self._shape_out[0]/data.shape[0])*100 - warnings.warn(f"Out port shape is too small to send all the received data. " - f"Around {percent_data_lost}% of the data has been lost.") # TODO: rewrite error message + sampled_idx = random_rng.choice(data_idx_array, + max_events, + replace=False) + + warnings.warn(f"Read {data.shape[0]} events. Maximum number of events is {max_events}. " + f"Removed {data.shape[0] - max_events} events by subsampling.") return data[sampled_idx], indices[sampled_idx] + else: + return data, indices diff --git a/src/lava/proc/event_data/event_pre_processor/dense_to_dense/down_sampling_dense.py b/src/lava/proc/event_data/event_pre_processor/dense_to_dense/down_sampling_dense.py index 64925ec4a..f7f83301c 100644 --- a/src/lava/proc/event_data/event_pre_processor/dense_to_dense/down_sampling_dense.py +++ b/src/lava/proc/event_data/event_pre_processor/dense_to_dense/down_sampling_dense.py @@ -2,11 +2,10 @@ # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ -import typing as ty - import numpy as np -from scipy import signal from numpy.lib.stride_tricks import as_strided +from scipy import signal +import typing as ty from lava.proc.event_data.event_pre_processor.utils import \ DownSamplingMethodDense @@ -24,7 +23,7 @@ class DownSamplingDense(AbstractProcess): def __init__(self, - shape_in: tuple, + shape_in: ty.Tuple[int], down_sampling_method: DownSamplingMethodDense, down_sampling_factor: int, **kwargs) -> None: diff --git a/tests/lava/proc/event_data/event_data_loader/test_aedat_data_loader.py b/tests/lava/proc/event_data/event_data_loader/test_aedat_data_loader.py index b508750ad..fa659e9a8 100644 --- a/tests/lava/proc/event_data/event_data_loader/test_aedat_data_loader.py +++ b/tests/lava/proc/event_data/event_data_loader/test_aedat_data_loader.py @@ -8,23 +8,19 @@ import typing as ty import unittest -from lava.proc.event_data.event_data_loader.aedat_data_loader import AedatDataLoader, \ - AedatDataLoaderPM - from lava.magma.core.process.process import AbstractProcess from lava.magma.core.process.ports.ports import InPort, OutPort from lava.magma.core.process.variable import Var - from lava.magma.core.resources import CPU from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol from lava.magma.core.model.py.ports import PyInPort, PyOutPort from lava.magma.core.model.py.type import LavaPyType from lava.magma.core.decorator import implements, requires from lava.magma.core.model.py.model import PyLoihiProcessModel - from lava.magma.core.run_conditions import RunSteps from lava.magma.core.run_configs import Loihi1SimCfg -# TODO : check that this way to structure imports is ok +from lava.proc.event_data.event_data_loader.aedat_data_loader import AedatDataLoader, \ + AedatDataLoaderPM class RecvSparse(AbstractProcess): def __init__(self, @@ -89,12 +85,6 @@ def test_negative_width_throws_exception(self): AedatDataLoader(file_path="../dvs_recording.aedat4", shape_out=(-43200,)) - # def test_invalid_shape_out_decimal(self): - # """Tests for a decimal width given.""" - # with(self.assertRaises(ValueError)): - # AedatDataLoader(file_path="../dvs_recording.aedat4", - # shape_out=(43200.5,)) - # TODO: add doc strings class TestProcessModelAedatDataLoader(unittest.TestCase): def test_init(self): @@ -112,7 +102,6 @@ def test_init(self): self.assertIsInstance(pm._stream, _AedatFileEventNumpyPacketIterator) self.assertIsInstance(pm._frame_shape, tuple) - self.assertIsInstance(pm._random_rng, np.random.Generator) def test_run_without_sub_sampling(self): data_loader = AedatDataLoader(file_path="../dvs_recording.aedat4", @@ -132,27 +121,27 @@ def test_sub_sampling(self): # TODO: reduce size of this (less timesteps?) data_history = [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1], - [0], - [1, 1, 1], - [1], - [1], - [1] + # [1, 1, 1, 1, 1, 1, 1, 1, 1], + # [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], + # [1, 1, 1, 1, 1, 1], + # [0], + # [1, 1, 1], + # [1], + # [1], + # [1] ] indices_history = [ [1597, 2308, 2486, 2496, 2498, 1787, 2642, 2633, 2489, 2488, 1596, 1729, 1727, 2500, 1780], - [1600, 1732, 2297, 1388, 2290, 2305, 3704, 3519, 1911], - [7138, 2301, 2471, 1601, 2982, 1364, 1379, 1386, 1384, - 2983, 1390, 2289, 1401, 1362, 2293], - [1910, 1382, 1909, 1562, 1606, 1381], - [464], - [2323, 1908, 1393], - [4062], - [1792], - [3889] + # [1600, 1732, 2297, 1388, 2290, 2305, 3704, 3519, 1911], + # [7138, 2301, 2471, 1601, 2982, 1364, 1379, 1386, 1384, + # 2983, 1390, 2289, 1401, 1362, 2293], + # [1910, 1382, 1909, 1562, 1606, 1381], + # [464], + # [2323, 1908, 1393], + # [4062], + # [1792], + # [3889] ] seed_rng = 0 rng = np.random.default_rng(seed=seed_rng) @@ -166,36 +155,35 @@ def test_sub_sampling(self): data_loader.out_port.connect(recv_sparse.in_port) # Run parameters - num_steps = 9 + num_steps = 1 run_cfg = Loihi1SimCfg() run_cnd = RunSteps(num_steps=1) # Running - for i in range(num_steps): - data_loader.run(condition=run_cnd, run_cfg=run_cfg) + data_loader.run(condition=run_cnd, run_cfg=run_cfg) - expected_data = np.array(data_history[i]) - expected_indices = np.array(indices_history[i]) + expected_data = np.array(data_history[0]) + expected_indices = np.array(indices_history[0]) - sent_and_received_data = \ - recv_sparse.data.get()[:expected_data.shape[0]] - sent_and_received_indices = \ - recv_sparse.idx.get()[:expected_indices.shape[0]] + sent_and_received_data = \ + recv_sparse.data.get()[:expected_data.shape[0]] + sent_and_received_indices = \ + recv_sparse.idx.get()[:expected_indices.shape[0]] - if expected_data.shape[0] > max_num_events: - data_idx_array = np.arange(0, expected_data.shape[0]) - sampled_idx = rng.choice(data_idx_array, - max_num_events, - replace=False) - # TODO: assert that after subsampling, the number of events is the maximum. Could also hard code expected events + if expected_data.shape[0] > max_num_events: + data_idx_array = np.arange(0, expected_data.shape[0]) + sampled_idx = rng.choice(data_idx_array, + max_num_events, + replace=False) + # TODO: assert that after subsampling, the number of events is the maximum. Could also hard code expected events - expected_data = expected_data[sampled_idx] - expected_indices = expected_indices[sampled_idx] + expected_data = expected_data[sampled_idx] + expected_indices = expected_indices[sampled_idx] - np.testing.assert_equal(sent_and_received_data, - expected_data) - np.testing.assert_equal(sent_and_received_indices, - expected_indices) + np.testing.assert_equal(sent_and_received_data, + expected_data) + np.testing.assert_equal(sent_and_received_indices, + expected_indices) # Stopping data_loader.stop() From ddb3bb753cbd793deb0fa3a6508016460abac9c6 Mon Sep 17 00:00:00 2001 From: "Cohen-Dumani, Joshua" Date: Mon, 5 Dec 2022 19:06:30 +0100 Subject: [PATCH 07/32] Applied PR comments for other processes --- .../event_data_loader/aedat_data_loader.py | 2 +- .../dense_to_dense/down_sampling_dense.py | 7 +--- .../dense_to_dense/flattening.py | 24 ++++------- .../sparse_to_dense/sparse_to_dense.py | 42 ++++++------------- .../binary_to_unary_polarity.py | 25 ++++------- 5 files changed, 33 insertions(+), 67 deletions(-) diff --git a/src/lava/proc/event_data/event_data_loader/aedat_data_loader.py b/src/lava/proc/event_data/event_data_loader/aedat_data_loader.py index dce3aa7c9..bc00f5d98 100644 --- a/src/lava/proc/event_data/event_data_loader/aedat_data_loader.py +++ b/src/lava/proc/event_data/event_data_loader/aedat_data_loader.py @@ -49,7 +49,7 @@ def _validate_file_path(file_path: str) -> None: @staticmethod def _validate_shape_out(shape_out: ty.Tuple[int]) -> None: if len(shape_out) != 1: - raise ValueError(f"Shape of the OutPort should have a shape of (n,). " + raise ValueError(f"Shape of the OutPort should be (n,). " f"{shape_out} was given.") if shape_out[0] <= 0: diff --git a/src/lava/proc/event_data/event_pre_processor/dense_to_dense/down_sampling_dense.py b/src/lava/proc/event_data/event_pre_processor/dense_to_dense/down_sampling_dense.py index f7f83301c..978594028 100644 --- a/src/lava/proc/event_data/event_pre_processor/dense_to_dense/down_sampling_dense.py +++ b/src/lava/proc/event_data/event_pre_processor/dense_to_dense/down_sampling_dense.py @@ -7,19 +7,16 @@ from scipy import signal import typing as ty -from lava.proc.event_data.event_pre_processor.utils import \ - DownSamplingMethodDense - from lava.magma.core.process.process import AbstractProcess from lava.magma.core.process.ports.ports import InPort, OutPort - from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol from lava.magma.core.model.py.ports import PyInPort, PyOutPort from lava.magma.core.model.py.type import LavaPyType from lava.magma.core.resources import CPU from lava.magma.core.decorator import implements, requires from lava.magma.core.model.py.model import PyLoihiProcessModel - +from lava.proc.event_data.event_pre_processor.utils import \ + DownSamplingMethodDense class DownSamplingDense(AbstractProcess): def __init__(self, diff --git a/src/lava/proc/event_data/event_pre_processor/dense_to_dense/flattening.py b/src/lava/proc/event_data/event_pre_processor/dense_to_dense/flattening.py index 755798071..d8adef62a 100644 --- a/src/lava/proc/event_data/event_pre_processor/dense_to_dense/flattening.py +++ b/src/lava/proc/event_data/event_pre_processor/dense_to_dense/flattening.py @@ -2,13 +2,12 @@ # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ -import typing as ty - +import math import numpy as np +import typing as ty from lava.magma.core.process.process import AbstractProcess from lava.magma.core.process.ports.ports import InPort, OutPort - from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol from lava.magma.core.model.py.ports import PyInPort, PyOutPort from lava.magma.core.model.py.type import LavaPyType @@ -16,18 +15,16 @@ from lava.magma.core.decorator import implements, requires from lava.magma.core.model.py.model import PyLoihiProcessModel -import math class Flattening(AbstractProcess): def __init__(self, - shape_in: tuple, + *, + shape_in: ty.Union[ty.Tuple[int, int], ty.Tuple[int, int, int]], **kwargs) -> None: super().__init__(shape_in=shape_in, **kwargs) - # TODO: Validation - self._validate_shape_in(shape_in) shape_out = (math.prod(shape_in),) @@ -36,25 +33,20 @@ def __init__(self, self.out_port = OutPort(shape_out) @staticmethod - def _validate_shape_in(shape_in): + def _validate_shape_in(shape_in: ty.Union[ty.Tuple[int, int], ty.Tuple[int, int, int]]) -> None: if not (len(shape_in) == 2 or len(shape_in) == 3): raise ValueError(f"shape_in should be 2 or 3 dimensional. " - f"{shape_in} given.") + f"{shape_in} was given.") - if not isinstance(shape_in[0], int) or not isinstance(shape_in[1], int): - raise ValueError(f"Width and height of shape_in should be integers." - f"{shape_in} given.") if len(shape_in) == 3: if shape_in[2] != 2: raise ValueError(f"Third dimension of shape_in should be " f"equal to 2." - f"{shape_in} given.") + f"{shape_in} was given.") if shape_in[0] <= 0 or shape_in[1] <= 0: raise ValueError(f"Width and height of shape_in should be positive." - f"{shape_in} given.") - - return shape_in + f"{shape_in} was given.") @implements(proc=Flattening, protocol=LoihiProtocol) diff --git a/src/lava/proc/event_data/event_pre_processor/sparse_to_dense/sparse_to_dense.py b/src/lava/proc/event_data/event_pre_processor/sparse_to_dense/sparse_to_dense.py index e11c8bc68..c6dee060b 100644 --- a/src/lava/proc/event_data/event_pre_processor/sparse_to_dense/sparse_to_dense.py +++ b/src/lava/proc/event_data/event_pre_processor/sparse_to_dense/sparse_to_dense.py @@ -2,13 +2,11 @@ # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ -import typing as ty - import numpy as np +import typing as ty from lava.magma.core.process.process import AbstractProcess from lava.magma.core.process.ports.ports import InPort, OutPort - from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol from lava.magma.core.model.py.ports import PyInPort, PyOutPort from lava.magma.core.model.py.type import LavaPyType @@ -19,8 +17,9 @@ class SparseToDense(AbstractProcess): def __init__(self, - shape_in: tuple, - shape_out: tuple, + *, + shape_in: ty.Tuple[int], + shape_out: ty.Union[ty.Tuple[int, int], ty.Tuple[int, int, int]], **kwargs) -> None: super().__init__(shape_in=shape_in, shape_out=shape_out, @@ -33,42 +32,27 @@ def __init__(self, self.out_port = OutPort(shape=shape_out) @staticmethod - def _validate_shape_in(shape_in): - if not isinstance(shape_in[0], int): - raise ValueError(f"Width of shape_in should be an integer. " - f"{shape_in} given.") + def _validate_shape_in(shape_in: ty.Tuple[int]) -> None: + if len(shape_in) != 1: + raise ValueError(f"Shape of the InPort should be (n,). " + f"{shape_in} was given.") if shape_in[0] <= 0: raise ValueError(f"Width of shape_in should be positive. {shape_in} given.") - if len(shape_in) != 1: - raise ValueError(f"shape_in should be 1 dimensional. {shape_in} given.") - - return shape_in - # test 2d instantiation ok - # test 3d instantiation ok - # test what happens when wanting a non 2-3D out shape - # non 1D in shape - # 3rd dimension not 2 in 3D case - # invalid shapes (decimal, negative) - @staticmethod - def _validate_shape_out(shape_out): + def _validate_shape_out(shape_out: ty.Union[ty.Tuple[int, int], ty.Tuple[int, int, int]]) -> None: if not (len(shape_out) == 2 or len(shape_out) == 3): - raise ValueError(f"shape out should be 2 or 3 dimensional. {shape_out} given.") + raise ValueError(f"shape_out should be 2 or 3 dimensional. {shape_out} given.") - if not isinstance(shape_out[0], int) or not isinstance(shape_out[1], int): - raise ValueError(f"Width and height of the out shape should be integers. " - f"{shape_out} given.") if len(shape_out) == 3: if shape_out[2] != 2: - raise ValueError(f"Depth of the out shape should be an integer and equal to 2. " + raise ValueError(f"Depth of the shape_out argument should be an integer and equal to 2. " f"{shape_out} given.") if shape_out[0] <= 0 or shape_out[1] <= 0: - raise ValueError(f"Width and height of the out shape should be positive. {shape_out} given.") - - return shape_out + raise ValueError(f"Width and height of the shape_out argument should be positive. " + f"{shape_out} given.") @implements(proc=SparseToDense, protocol=LoihiProtocol) diff --git a/src/lava/proc/event_data/event_pre_processor/sparse_to_sparse/binary_to_unary_polarity.py b/src/lava/proc/event_data/event_pre_processor/sparse_to_sparse/binary_to_unary_polarity.py index 6013c99f1..4536578b9 100644 --- a/src/lava/proc/event_data/event_pre_processor/sparse_to_sparse/binary_to_unary_polarity.py +++ b/src/lava/proc/event_data/event_pre_processor/sparse_to_sparse/binary_to_unary_polarity.py @@ -2,13 +2,11 @@ # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ -import typing as ty - import numpy as np +import typing as ty from lava.magma.core.process.process import AbstractProcess from lava.magma.core.process.ports.ports import InPort, OutPort - from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol from lava.magma.core.model.py.ports import PyInPort, PyOutPort from lava.magma.core.model.py.type import LavaPyType @@ -19,9 +17,11 @@ class BinaryToUnaryPolarity(AbstractProcess): def __init__(self, - shape: tuple, + *, + shape: ty.Tuple[int], **kwargs) -> None: - super().__init__(shape=shape, **kwargs) + super().__init__(shape=shape, + **kwargs) self._validate_shape(shape) @@ -29,20 +29,13 @@ def __init__(self, self.out_port = OutPort(shape=shape) @staticmethod - def _validate_shape(shape): - if not isinstance(shape[0], int): - raise ValueError(f"Max number of events should be an integer." - f"{shape} given.") + def _validate_shape(shape: ty.Tuple[int]) -> None: + if len(shape) != 1: + raise ValueError(f"Shape should be (n,). {shape} was given.") if shape[0] <= 0: raise ValueError(f"Max number of events should be positive. " - f"{shape} given.") - - if len(shape) != 1: - raise ValueError(f"Shape of the OutPort should be 1D. " - f"{shape} given.") - - return shape + f"{shape} was given.") @implements(proc=BinaryToUnaryPolarity, protocol=LoihiProtocol) From 877ed674b1c659a0158aef6b2c27fe17f8ecd409 Mon Sep 17 00:00:00 2001 From: "Cohen-Dumani, Joshua" Date: Mon, 5 Dec 2022 19:32:38 +0100 Subject: [PATCH 08/32] Applied PR comments for other tests, still TODOs --- .../dense_to_dense/test_flattening.py | 51 ++++------ .../sparse_to_dense/test_sparse_to_dense.py | 98 ++++++++----------- .../test_binary_to_unary_polarity.py | 40 ++++---- 3 files changed, 78 insertions(+), 111 deletions(-) diff --git a/tests/lava/proc/event_data/event_pre_processor/dense_to_dense/test_flattening.py b/tests/lava/proc/event_data/event_pre_processor/dense_to_dense/test_flattening.py index dde17f9ac..fe5744438 100644 --- a/tests/lava/proc/event_data/event_pre_processor/dense_to_dense/test_flattening.py +++ b/tests/lava/proc/event_data/event_pre_processor/dense_to_dense/test_flattening.py @@ -1,30 +1,28 @@ # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ -import unittest -from lava.proc.event_data.event_pre_processor.dense_to_dense.flattening import Flattening, FlatteningPM import numpy as np +import typing as ty +import unittest from lava.magma.core.process.process import AbstractProcess from lava.magma.core.process.ports.ports import InPort, OutPort from lava.magma.core.process.variable import Var - from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol from lava.magma.core.model.py.ports import PyInPort, PyOutPort from lava.magma.core.model.py.type import LavaPyType from lava.magma.core.resources import CPU from lava.magma.core.decorator import implements, requires from lava.magma.core.model.py.model import PyLoihiProcessModel - from lava.magma.core.run_conditions import RunSteps from lava.magma.core.run_configs import Loihi1SimCfg +from lava.proc.event_data.event_pre_processor.dense_to_dense.flattening import Flattening, FlatteningPM -import matplotlib.pyplot as plt - +# TODO: add doc strings for these processes class RecvDense(AbstractProcess): def __init__(self, - shape: tuple) -> None: + shape: ty.Tuple[int]) -> None: super().__init__(shape=shape) self.in_port = InPort(shape=shape) @@ -47,7 +45,7 @@ def run_spk(self) -> None: class SendDense(AbstractProcess): def __init__(self, - shape: tuple, + shape: ty.Union[ty.Tuple[int, int], ty.Tuple[int, int, int]], data: np.ndarray) -> None: super().__init__(shape=shape, data=data) @@ -76,41 +74,31 @@ def test_init(self): self.assertIsInstance(flattener, Flattening) self.assertEqual(flattener.proc_params["shape_in"], (240, 180)) - def test_invalid_shape_in_negative_width_or_height(self): - """Checks if an error is raised when a negative width or height - for shape_in is given.""" - with(self.assertRaises(ValueError)): - _ = Flattening(shape_in=(-240, 180)) - + def test_negative_width_or_height_throws_exception(self): + """Tests whether an exception is thrown when a negative width or height for the shape_in argument is given.""" with(self.assertRaises(ValueError)): - _ = Flattening(shape_in=(240, -180)) + Flattening(shape_in=(-240, 180)) - def test_invalid_shape_in_decimal_width_or_height(self): - """Checks if an error is raised when a decimal width or height - for shape_in is given.""" with(self.assertRaises(ValueError)): - _ = Flattening(shape_in=(240.5, 180)) + Flattening(shape_in=(240, -180)) + def test_too_few_or_too_many_dimensions_throws_exception(self): + """Tests whether an exception is thrown when a 1d or 4d value for the shape_in argument is given.""" with(self.assertRaises(ValueError)): - _ = Flattening(shape_in=(240, 180.5)) + Flattening(shape_in=(240,)) - def test_invalid_shape_in_dimension(self): - """Checks if an error is raised when a 1d or 4d input shape is given.""" with(self.assertRaises(ValueError)): - _ = Flattening(shape_in=(240,)) + Flattening(shape_in=(240, 180, 2, 1)) + def test_third_dimension_not_2_throws_exception(self): + """Tests whether an exception is thrown if the value of the 3rd dimension for the shape_in argument is not 2.""" with(self.assertRaises(ValueError)): - _ = Flattening(shape_in=(240, 180, 2, 1)) - - def test_invalid_shape_in_third_dimension_not_2(self): - """Checks if an error is raised if the value of the 3rd dimension - for the shape_in parameter is not 2.""" - with(self.assertRaises(ValueError)): - _ = Flattening(shape_in=(240, 180, 1)) - + Flattening(shape_in=(240, 180, 1)) +# TODO: add doc strings class TestProcessModelFlattening(unittest.TestCase): def test_init(self): + """Tests instantiation of the Flattening process model""" proc_params = { "shape_in": (240, 180) } @@ -120,6 +108,7 @@ def test_init(self): self.assertIsInstance(pm, FlatteningPM) self.assertEqual(pm._shape_in, proc_params["shape_in"]) + # TODO: can probably be deleted def test_run(self): data = np.zeros((8, 8)) diff --git a/tests/lava/proc/event_data/event_pre_processor/sparse_to_dense/test_sparse_to_dense.py b/tests/lava/proc/event_data/event_pre_processor/sparse_to_dense/test_sparse_to_dense.py index 542b051b6..056ab7a16 100644 --- a/tests/lava/proc/event_data/event_pre_processor/sparse_to_dense/test_sparse_to_dense.py +++ b/tests/lava/proc/event_data/event_pre_processor/sparse_to_dense/test_sparse_to_dense.py @@ -1,31 +1,28 @@ # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ -import unittest -from lava.proc.event_data.event_pre_processor.sparse_to_dense.sparse_to_dense import SparseToDense, SparseToDensePM import numpy as np +import typing as ty +import unittest from lava.magma.core.process.process import AbstractProcess from lava.magma.core.process.ports.ports import InPort, OutPort from lava.magma.core.process.variable import Var - from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol from lava.magma.core.model.py.ports import PyInPort, PyOutPort from lava.magma.core.model.py.type import LavaPyType from lava.magma.core.resources import CPU from lava.magma.core.decorator import implements, requires from lava.magma.core.model.py.model import PyLoihiProcessModel - from lava.magma.core.run_conditions import RunSteps from lava.magma.core.run_configs import Loihi1SimCfg +from lava.proc.event_data.event_pre_processor.sparse_to_dense.sparse_to_dense import SparseToDense, SparseToDensePM -import matplotlib.pyplot as plt - - +# TODO: add doc strings class RecvDense(AbstractProcess): def __init__(self, - shape: tuple) -> None: + shape: ty.Union[ty.Tuple[int, int], ty.Tuple[int, int, int]]) -> None: super().__init__(shape=shape) self.in_port = InPort(shape=shape) @@ -48,7 +45,7 @@ def run_spk(self) -> None: class SendSparse(AbstractProcess): def __init__(self, - shape: tuple, + shape: ty.Tuple[int], data: np.ndarray, indices: np.ndarray) -> None: super().__init__(shape=shape, data=data, indices=indices) @@ -73,11 +70,9 @@ def run_spk(self) -> None: self.out_port.send(data, idx) - - class TestProcessSparseToDense(unittest.TestCase): def test_init_2d(self): - """Tests instantiation of SparseToDense for a 2D output""" + """Tests instantiation of SparseToDense for a 2D output.""" sparse_to_dense = SparseToDense(shape_in=(43200,), shape_out=(240, 180)) @@ -86,7 +81,7 @@ def test_init_2d(self): self.assertEqual(sparse_to_dense.proc_params["shape_out"], (240, 180)) def test_init_3d(self): - """Tests instantiation of SparseToDense for a 3D output""" + """Tests instantiation of SparseToDense for a 3D output.""" sparse_to_dense = SparseToDense(shape_in=(43200,), shape_out=(240, 180, 2)) @@ -94,69 +89,53 @@ def test_init_3d(self): self.assertEqual(sparse_to_dense.proc_params["shape_in"], (43200,)) self.assertEqual(sparse_to_dense.proc_params["shape_out"], (240, 180, 2)) - def test_invalid_shape_out_dimension(self): - """Check if an error is raised when shape_out is 1D or 4D""" - # TODO: should it rather raise not implemented error? - with(self.assertRaises(ValueError)): - _ = SparseToDense(shape_in=(43200,), - shape_out=(240,)) - + def test_too_few_or_too_many_dimensions_shape_out_throws_exception(self): + """Tests whether an exception is thrown when a 1d or 4d value for the shape_out argument is given.""" + # TODO: should the 4D+ case rather raise a NotImplementedError? with(self.assertRaises(ValueError)): - _ = SparseToDense(shape_in=(43200,), - shape_out=(240, 180, 2, 1)) + SparseToDense(shape_in=(43200,), + shape_out=(240,)) - def test_invalid_shape_in_dimension(self): - """Check if an error is raised when shape_in is 2D""" with(self.assertRaises(ValueError)): - _ = SparseToDense(shape_in=(43200, 1), - shape_out=(240, 180)) + SparseToDense(shape_in=(43200,), + shape_out=(240, 180, 2, 1)) - def test_invalid_shape_in_third_dimension_not_2(self): - """Checks if an error is raised if the value of the 3rd dimension - for the shape_out parameter is not 2""" + def test_too_many_dimensions_shape_in_throws_exception(self): + """Tests whether a shape_in argument with too many dimensions throws an exception.""" with(self.assertRaises(ValueError)): - _ = SparseToDense(shape_in=(43200,), - shape_out=(240, 180, 1)) + SparseToDense(shape_in=(43200, 1), + shape_out=(240, 180)) - def test_invalid_shape_in_negative_integer(self): - """Checks if an error is raised when a negative integer for shape_in - is given""" + def test_third_dimension_not_2_throws_exception(self): + """Tests whether an exception is thrown if the value of the 3rd dimension for the + shape_out argument is not 2.""" with(self.assertRaises(ValueError)): - _ = SparseToDense(shape_in=(-43200,), - shape_out=(240, 180)) + SparseToDense(shape_in=(43200,), + shape_out=(240, 180, 1)) - def test_invalid_shape_in_decimal(self): - """Checks if an error is raised when a decimal integer for shape_in - is given""" - with(self.assertRaises(ValueError)): - _ = SparseToDense(shape_in=(43200.5,), - shape_out=(240, 180)) - - def test_invalid_shape_out_negative_width_or_height(self): - """Checks if an error is raised when a negative width or height for - shape_out is given""" - with(self.assertRaises(ValueError)): - _ = SparseToDense(shape_in=(43200,), - shape_out=(-240, 180)) - + def test_negative_width_shape_in_throws_exception(self): + """Tests whether an exception is thrown when a negative integer for the shape_in + argument is given""" with(self.assertRaises(ValueError)): - _ = SparseToDense(shape_in=(43200,), - shape_out=(240, -180)) + SparseToDense(shape_in=(-43200,), + shape_out=(240, 180)) - def test_invalid_shape_out_decimal_width_or_height(self): - """Checks if an error is raised when a decimal width or height for - shape_out is given""" + def test_negative_width_or_height_shape_out_throws_exception(self): + """Tests whether an exception is thrown when a negative width or height for the + shape_out argument is given""" with(self.assertRaises(ValueError)): - _ = SparseToDense(shape_in=(43200,), - shape_out=(240.5, 180)) + SparseToDense(shape_in=(43200,), + shape_out=(-240, 180)) with(self.assertRaises(ValueError)): - _ = SparseToDense(shape_in=(43200,), - shape_out=(240, 180.5)) + SparseToDense(shape_in=(43200,), + shape_out=(240, -180)) +#TODO: add doc strings class TestProcessModelSparseToDense(unittest.TestCase): def test_init(self): + """Tests instantiation of the SparseToDense process model.""" proc_params = { "shape_out": (240, 180) } @@ -166,6 +145,7 @@ def test_init(self): self.assertIsInstance(pm, SparseToDensePM) self.assertEqual(pm._shape_out, proc_params["shape_out"]) +# TODO: can be deleted I guess def test_run(self): data = np.array([1, 1, 1, 1, 1, 1]) xs = [0, 1, 2, 1, 2, 4] diff --git a/tests/lava/proc/event_data/event_pre_processor/sparse_to_sparse/test_binary_to_unary_polarity.py b/tests/lava/proc/event_data/event_pre_processor/sparse_to_sparse/test_binary_to_unary_polarity.py index 0967f8669..16d7ee112 100644 --- a/tests/lava/proc/event_data/event_pre_processor/sparse_to_sparse/test_binary_to_unary_polarity.py +++ b/tests/lava/proc/event_data/event_pre_processor/sparse_to_sparse/test_binary_to_unary_polarity.py @@ -1,30 +1,29 @@ # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ -import unittest -from lava.proc.event_data.event_pre_processor.sparse_to_sparse.binary_to_unary_polarity import \ - BinaryToUnaryPolarity, BinaryToUnaryPolarityPM import numpy as np +import typing as ty +import unittest from lava.magma.core.process.process import AbstractProcess from lava.magma.core.process.ports.ports import InPort, OutPort from lava.magma.core.process.variable import Var - from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol from lava.magma.core.model.py.ports import PyInPort, PyOutPort from lava.magma.core.model.py.type import LavaPyType from lava.magma.core.resources import CPU from lava.magma.core.decorator import implements, requires from lava.magma.core.model.py.model import PyLoihiProcessModel - from lava.magma.core.run_conditions import RunSteps from lava.magma.core.run_configs import Loihi1SimCfg +from lava.proc.event_data.event_pre_processor.sparse_to_sparse.binary_to_unary_polarity import \ + BinaryToUnaryPolarity, BinaryToUnaryPolarityPM - +# TODO: add doc strings for these processes class RecvSparse(AbstractProcess): def __init__(self, - shape: tuple) -> None: + shape: ty.Tuple[int]) -> None: super().__init__(shape=shape) self.in_port = InPort(shape=shape) @@ -54,7 +53,7 @@ def run_spk(self) -> None: class SendSparse(AbstractProcess): def __init__(self, - shape: tuple, + shape: ty.Tuple[int], data: np.ndarray, indices: np.ndarray) -> None: super().__init__(shape=shape, data=data, indices=indices) @@ -81,30 +80,26 @@ def run_spk(self) -> None: class TestProcessBinaryToUnaryPolarity(unittest.TestCase): def test_init(self): - """Tests instantiation of BinaryToUnaryPolarity""" + """Tests instantiation of BinaryToUnaryPolarity.""" converter = BinaryToUnaryPolarity(shape=(43200,)) self.assertIsInstance(converter, BinaryToUnaryPolarity) self.assertEqual(converter.proc_params["shape"], (43200,)) - def test_invalid_shape_out_dimension(self): - """Test for an invalid shape""" + def test_too_many_dimensions_throws_exception(self): + """Tests whether a shape argument with too many dimensions throws an exception.""" with(self.assertRaises(ValueError)): - _ = BinaryToUnaryPolarity(shape=(240, 180)) + BinaryToUnaryPolarity(shape=(240, 180)) - def test_negative_width(self): - """Tests for a negative width given""" + def test_negative_width_throws_exception(self): + """Tests whether a shape argument with a negative width throws an exception.""" with(self.assertRaises(ValueError)): - _ = BinaryToUnaryPolarity(shape=(-43200,)) - - def test_decimal_width(self): - """Tests for a decimal width given""" - with(self.assertRaises(ValueError)): - _ = BinaryToUnaryPolarity(shape=(43200.5,)) - + BinaryToUnaryPolarity(shape=(-43200,)) +# TODO: add doc strings class TestProcessModelBinaryToUnaryPolarity(unittest.TestCase): def test_init(self): + """Tests instantiation of the BinaryToUnary process model.""" proc_params = { "shape": (10,) } @@ -114,6 +109,8 @@ def test_init(self): self.assertIsInstance(pm, BinaryToUnaryPolarityPM) def test_binary_to_unary_polarity_encoding(self): + # TODO: add explanations for the meaning of binary and unary somewhere? explain test variables? + """Tests whether the encoding from binary to unary works correctly.""" data = np.array([1, 1, 1, 0, 1, 0, 0, 1, 0]) indices = np.array([1, 5, 4, 3, 3, 2, 0, 1, 0]) @@ -149,6 +146,7 @@ def test_binary_to_unary_polarity_encoding(self): np.testing.assert_equal(sent_and_received_indices, expected_indices) + # TODO: I guess not needed? should any edge cases be treated? def test_run(self): data = np.array([1, 1, 1, 0, 1, 0, 0, 1, 0]) indices = np.array([1, 5, 4, 3, 3, 2, 0, 1, 0]) From 093605faf277f63f3542d1dc23e8c9544b78981c Mon Sep 17 00:00:00 2001 From: "Cohen-Dumani, Joshua" Date: Tue, 6 Dec 2022 17:39:06 +0100 Subject: [PATCH 09/32] doc strings for AedatDataLoader (+tests), events utils, PR comments --- .../event_data_loader/aedat_data_loader.py | 93 ++++++++---- src/lava/utils/events.py | 29 ++++ .../test_aedat_data_loader.py | 134 +++++++++++------- .../dense_to_dense/test_flattening.py | 2 +- .../sparse_to_dense/test_sparse_to_dense.py | 8 +- .../test_binary_to_unary_polarity.py | 8 +- 6 files changed, 186 insertions(+), 88 deletions(-) create mode 100644 src/lava/utils/events.py diff --git a/src/lava/proc/event_data/event_data_loader/aedat_data_loader.py b/src/lava/proc/event_data/event_data_loader/aedat_data_loader.py index bc00f5d98..9fc4d56a8 100644 --- a/src/lava/proc/event_data/event_data_loader/aedat_data_loader.py +++ b/src/lava/proc/event_data/event_data_loader/aedat_data_loader.py @@ -16,9 +16,34 @@ from lava.magma.core.resources import CPU from lava.magma.core.decorator import implements, requires from lava.magma.core.model.py.model import PyLoihiProcessModel +from lava.utils.events import sub_sample class AedatDataLoader(AbstractProcess): + """ + Process that reads data from an aedat4 file. + + This process outputs a sparse tensor of the event data stream, meaning + two 1-dimensional vectors containing polarity data and indices. The + data is sub-sampled to fit the given output shape. The process is + implemented such that the reading from file loops back to the beginning + of the file when it reaches the end. + + Parameters + ---------- + file_path : str + Path to the desired aedat4 file. + + shape_out : tuple (shape (n,)) + The shape of the OutPort. The size of this parameter sets a maximum + number of events per time-step, and the process will subsample data + in order to fit it into this port. Data which contains fewer events + will be padded with zeros. + + seed_sub_sampling : int, optional + Seed used for the random number generator that sub-samples data to + fit the OutPort. + """ def __init__(self, *, file_path: str, @@ -37,17 +62,23 @@ def __init__(self, @staticmethod def _validate_file_path(file_path: str) -> None: - # Checking file extension + """ + Checks whether the file extension is valid and if the file can + be found. Raises relevant exception if not. + """ if not file_path.lower().endswith('.aedat4'): raise ValueError(f"AedatDataLoader currently only supports aedat4 files (*.aedat4). " f"{file_path} was given.") - # Checking if file exists if not os.path.isfile(file_path): raise FileNotFoundError(f"File not found. {file_path} given.") @staticmethod def _validate_shape_out(shape_out: ty.Tuple[int]) -> None: + """ + Checks whether the given shape is valid and that the size given + is not a negative number. Raises relevant exception if not + """ if len(shape_out) != 1: raise ValueError(f"Shape of the OutPort should be (n,). " f"{shape_out} was given.") @@ -60,6 +91,10 @@ def _validate_shape_out(shape_out: ty.Tuple[int]) -> None: @implements(proc=AedatDataLoader, protocol=LoihiProtocol) @requires(CPU) class AedatDataLoaderPM(PyLoihiProcessModel): + """ + Implementation of the Aedat Data Loader process on Loihi, with sparse + representation of events. + """ out_port: PyOutPort = LavaPyType(PyOutPort.VEC_SPARSE, int) def __init__(self, proc_params: dict) -> None: @@ -73,11 +108,12 @@ def __init__(self, proc_params: dict) -> None: self._seed_sub_sampling = proc_params["seed_sub_sampling"] - def _init_aedat_file(self) -> None: - self._file = AedatFile(file_name=self._file_path) - self._stream = self._file["events"].numpy() - def run_spk(self) -> None: + """ + Compiles events into a batch (roughly 10ms long). The polarity data + and x and y values are then used to encode the sparse tensor. The + data is sub-sampled if necessary, and then sent out. + """ events = self._get_next_event_batch() data, indices = self._encode_data_and_indices(events) @@ -87,8 +123,13 @@ def run_spk(self) -> None: self.out_port.send(data, indices) def _get_next_event_batch(self): + """ + Compiles events from the event stream into batches which will be + treated in a single timestep. Once we reach the end of the file, the + process loops back to the start of the file. + """ try: - # If end of file, raise StopIteration error. + # If end of file, raises StopIteration error. events = self._stream.__next__() except StopIteration: # Reset the iterator and loop back to the start of the file. @@ -97,33 +138,23 @@ def _get_next_event_batch(self): return events -# TODO: change type annotation of events + def _init_aedat_file(self) -> None: + """ + Resets the event stream + """ + self._file = AedatFile(file_name=self._file_path) + self._stream = self._file["events"].numpy() + + # TODO: look into the type of "events" def _encode_data_and_indices(self, - events: dict) \ + events: ty.Dict) \ -> ty.Tuple[np.ndarray, np.ndarray]: - + """ + Extracts the polarity data, and x and y indices from the given + batch of events, and encodes them accordingly. + """ xs, ys, ps = events['x'], events['y'], events['polarity'] data = ps indices = np.ravel_multi_index((xs, ys), self._frame_shape) - return data, indices - -def sub_sample(data: np.ndarray, - indices: np.ndarray, - max_events: int, - seed_random: ty.Optional[int] = 0) \ - -> ty.Tuple[np.ndarray, np.ndarray]: - # If we have more data than our shape allows, subsample - if data.shape[0] > max_events: - random_rng = np.random.default_rng(seed_random) - data_idx_array = np.arange(0, data.shape[0]) - sampled_idx = random_rng.choice(data_idx_array, - max_events, - replace=False) - - warnings.warn(f"Read {data.shape[0]} events. Maximum number of events is {max_events}. " - f"Removed {data.shape[0] - max_events} events by subsampling.") - - return data[sampled_idx], indices[sampled_idx] - else: - return data, indices + return data, indices \ No newline at end of file diff --git a/src/lava/utils/events.py b/src/lava/utils/events.py new file mode 100644 index 000000000..5018c8399 --- /dev/null +++ b/src/lava/utils/events.py @@ -0,0 +1,29 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import numpy as np +import typing as ty +import warnings + + +def sub_sample(data: np.ndarray, + indices: np.ndarray, + max_events: int, + seed_random: ty.Optional[int] = 0) \ + -> ty.Tuple[np.ndarray, np.ndarray]: + # If we have more data than our shape allows, subsample + if data.shape[0] > max_events: + random_rng = np.random.default_rng(seed_random) + data_idx_array = np.arange(0, data.shape[0]) + sampled_idx = random_rng.choice(data_idx_array, + max_events, + replace=False) + + percentage_data_lost = (1 - max_events/data.shape[0])*100 + warnings.warn(f"Read {data.shape[0]} events. Maximum number of events is {max_events}. " + f"Removed {data.shape[0] - max_events}({percentage_data_lost}%) events by subsampling.") + + return data[sampled_idx], indices[sampled_idx] + else: + return data, indices \ No newline at end of file diff --git a/tests/lava/proc/event_data/event_data_loader/test_aedat_data_loader.py b/tests/lava/proc/event_data/event_data_loader/test_aedat_data_loader.py index fa659e9a8..a74a12b98 100644 --- a/tests/lava/proc/event_data/event_data_loader/test_aedat_data_loader.py +++ b/tests/lava/proc/event_data/event_data_loader/test_aedat_data_loader.py @@ -23,6 +23,13 @@ AedatDataLoaderPM class RecvSparse(AbstractProcess): + """ + Process that receives arbitrary sparse data. + + Parameters + ---------- + shape: tuple, shape of the process + """ def __init__(self, shape: ty.Tuple[int]) -> None: super().__init__(shape=shape) @@ -42,6 +49,9 @@ class PyRecvSparsePM(PyLoihiProcessModel): idx: np.ndarray = LavaPyType(np.ndarray, int) def run_spk(self) -> None: + """ + Receives the data and pads with zeros to fit them to the port shape. TODO: why? + """ data, idx = self.in_port.recv() self.data = np.pad(data, @@ -52,7 +62,9 @@ def run_spk(self) -> None: class TestProcessAedatDataLoader(unittest.TestCase): def test_init(self): - """Tests instantiation of AedatDataLoader.""" + """ + Tests instantiation of AedatDataLoader. + """ data_loader = AedatDataLoader(file_path="../dvs_recording.aedat4", shape_out=(43200,)) @@ -62,25 +74,34 @@ def test_init(self): self.assertEqual(data_loader.proc_params["shape_out"], (43200,)) def test_unsupported_file_extension_throws_exception(self): - """Tests whether a file_path argument with an unsupported file extension throws an exception.""" + """ + Tests whether a file_path argument with an unsupported file extension + throws an exception. + """ with(self.assertRaises(ValueError)): AedatDataLoader(file_path="test_aedat_data_loader.py", shape_out=(43200,)) def test_missing_file_throws_exception(self): - """Tests whether an exception is thrown when a specified file does not exist.""" + """ + Tests whether an exception is thrown when a specified file does not exist. + """ with(self.assertRaises(FileNotFoundError)): AedatDataLoader(file_path="missing_file.aedat4", shape_out=(43200,)) - def test_too_many_dimensions_throws_exception(self): - """Tests whether a shape_out argument with too many dimensions throws an exception.""" + def test_invalid_shape_throws_exception(self): + """ + Tests whether a shape_out argument with an invalid shape throws an exception. + """ with(self.assertRaises(ValueError)): AedatDataLoader(file_path="../dvs_recording.aedat4", shape_out=(240, 180)) - def test_negative_width_throws_exception(self): - """Tests whether a shape_out argument with a negative width throws an exception.""" + def test_negative_size_throws_exception(self): + """ + Tests whether a shape_out argument with a negative size throws an exception. + """ with(self.assertRaises(ValueError)): AedatDataLoader(file_path="../dvs_recording.aedat4", shape_out=(-43200,)) @@ -88,6 +109,9 @@ def test_negative_width_throws_exception(self): # TODO: add doc strings class TestProcessModelAedatDataLoader(unittest.TestCase): def test_init(self): + """ + Tests instantiation of the AedatDataLoader process model. + """ proc_params = { "file_path": "../dvs_recording.aedat4", "shape_out": (3000,), @@ -104,6 +128,11 @@ def test_init(self): self.assertIsInstance(pm._frame_shape, tuple) def test_run_without_sub_sampling(self): + """ + Tests whether running yields the expectde behavior, given that the + user parameters are all correct. + TODO: implement this test, show functionality without sub-sampling + """ data_loader = AedatDataLoader(file_path="../dvs_recording.aedat4", shape_out=(3000,)) @@ -115,33 +144,27 @@ def test_run_without_sub_sampling(self): data_loader.stop() - # TODO: add asserts on the output to show functionality without subsampling - def test_sub_sampling(self): - # TODO: reduce size of this (less timesteps?) + """ + Tests whether we get the expected behavior when we set a max_num_events + that is smaller than the amount of events we receive in a given batch + (i.e. the process will sub-sample correctly). + """ data_history = [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], - # [1, 1, 1, 1, 1, 1, 1, 1, 1], - # [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], - # [1, 1, 1, 1, 1, 1], - # [0], - # [1, 1, 1], - # [1], - # [1], - # [1] + [1, 1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [0] ] indices_history = [ [1597, 2308, 2486, 2496, 2498, 1787, 2642, 2633, 2489, 2488, 1596, 1729, 1727, 2500, 1780], - # [1600, 1732, 2297, 1388, 2290, 2305, 3704, 3519, 1911], - # [7138, 2301, 2471, 1601, 2982, 1364, 1379, 1386, 1384, - # 2983, 1390, 2289, 1401, 1362, 2293], - # [1910, 1382, 1909, 1562, 1606, 1381], - # [464], - # [2323, 1908, 1393], - # [4062], - # [1792], - # [3889] + [1600, 1732, 2297, 1388, 2290, 2305, 3704, 3519, 1911], + [7138, 2301, 2471, 1601, 2982, 1364, 1379, 1386, 1384, + 2983, 1390, 2289, 1401, 1362, 2293], + [1910, 1382, 1909, 1562, 1606, 1381], + [464] ] seed_rng = 0 rng = np.random.default_rng(seed=seed_rng) @@ -155,42 +178,53 @@ def test_sub_sampling(self): data_loader.out_port.connect(recv_sparse.in_port) # Run parameters - num_steps = 1 + num_steps = 5 run_cfg = Loihi1SimCfg() run_cnd = RunSteps(num_steps=1) # Running - data_loader.run(condition=run_cnd, run_cfg=run_cfg) + for i in range(num_steps): + data_loader.run(condition=run_cnd, run_cfg=run_cfg) - expected_data = np.array(data_history[0]) - expected_indices = np.array(indices_history[0]) + expected_data = np.array(data_history[i]) + expected_indices = np.array(indices_history[i]) - sent_and_received_data = \ - recv_sparse.data.get()[:expected_data.shape[0]] - sent_and_received_indices = \ - recv_sparse.idx.get()[:expected_indices.shape[0]] + sent_and_received_data = \ + recv_sparse.data.get()[:expected_data.shape[0]] + sent_and_received_indices = \ + recv_sparse.idx.get()[:expected_indices.shape[0]] - if expected_data.shape[0] > max_num_events: - data_idx_array = np.arange(0, expected_data.shape[0]) - sampled_idx = rng.choice(data_idx_array, - max_num_events, - replace=False) - # TODO: assert that after subsampling, the number of events is the maximum. Could also hard code expected events + if expected_data.shape[0] > max_num_events: + data_idx_array = np.arange(0, expected_data.shape[0]) + sampled_idx = rng.choice(data_idx_array, + max_num_events, + replace=False) + # TODO: assert that after subsampling, the number of events is the maximum. Could also hard code expected events - expected_data = expected_data[sampled_idx] - expected_indices = expected_indices[sampled_idx] + expected_data = expected_data[sampled_idx] + expected_indices = expected_indices[sampled_idx] - np.testing.assert_equal(sent_and_received_data, - expected_data) - np.testing.assert_equal(sent_and_received_indices, - expected_indices) + np.testing.assert_equal(sent_and_received_data, + expected_data) + np.testing.assert_equal(sent_and_received_indices, + expected_indices) # Stopping data_loader.stop() -# TODO: add another test that runs the process twice with different seeds and asserts that the events have been sampled differently + def test_sub_sampling_seed(self): + """ + Tests whether using different seeds does indeed result in different samples. + """ + # TODO: implement this + pass def test_end_of_file(self): + """ + Tests whether we loop back to the beginning of the event stream when we reach + the end of the aedat4 file. + TODO: implement this. dvs_recording.aedat4 should be 30 timesteps long. + """ data_loader = AedatDataLoader(file_path="../dvs_recording.aedat4", shape_out=(3000,), seed_sub_sampling=0) @@ -209,6 +243,10 @@ def test_end_of_file(self): self.assertFalse(data_loader.runtime._is_running) def test_index_encoding(self): + """ + Tests whether indices are correctly calculated given x and y coordinates. + TODO: have less timesteps? maybe 2-3 (show it works for multiple timesteps with multiple sizes)? + """ x_history = [ [8, 12, 13, 13, 13, 9, 14, 14, 13, 13, 8, 9, 9, 13, 9], [8, 9, 12, 7, 12, 12, 20, 19, 10], diff --git a/tests/lava/proc/event_data/event_pre_processor/dense_to_dense/test_flattening.py b/tests/lava/proc/event_data/event_pre_processor/dense_to_dense/test_flattening.py index fe5744438..104993dc6 100644 --- a/tests/lava/proc/event_data/event_pre_processor/dense_to_dense/test_flattening.py +++ b/tests/lava/proc/event_data/event_pre_processor/dense_to_dense/test_flattening.py @@ -82,7 +82,7 @@ def test_negative_width_or_height_throws_exception(self): with(self.assertRaises(ValueError)): Flattening(shape_in=(240, -180)) - def test_too_few_or_too_many_dimensions_throws_exception(self): + def test_invalid_shape_throws_exception(self): """Tests whether an exception is thrown when a 1d or 4d value for the shape_in argument is given.""" with(self.assertRaises(ValueError)): Flattening(shape_in=(240,)) diff --git a/tests/lava/proc/event_data/event_pre_processor/sparse_to_dense/test_sparse_to_dense.py b/tests/lava/proc/event_data/event_pre_processor/sparse_to_dense/test_sparse_to_dense.py index 056ab7a16..56650d184 100644 --- a/tests/lava/proc/event_data/event_pre_processor/sparse_to_dense/test_sparse_to_dense.py +++ b/tests/lava/proc/event_data/event_pre_processor/sparse_to_dense/test_sparse_to_dense.py @@ -89,7 +89,7 @@ def test_init_3d(self): self.assertEqual(sparse_to_dense.proc_params["shape_in"], (43200,)) self.assertEqual(sparse_to_dense.proc_params["shape_out"], (240, 180, 2)) - def test_too_few_or_too_many_dimensions_shape_out_throws_exception(self): + def test_invalid_shape_out_throws_exception(self): """Tests whether an exception is thrown when a 1d or 4d value for the shape_out argument is given.""" # TODO: should the 4D+ case rather raise a NotImplementedError? with(self.assertRaises(ValueError)): @@ -100,8 +100,8 @@ def test_too_few_or_too_many_dimensions_shape_out_throws_exception(self): SparseToDense(shape_in=(43200,), shape_out=(240, 180, 2, 1)) - def test_too_many_dimensions_shape_in_throws_exception(self): - """Tests whether a shape_in argument with too many dimensions throws an exception.""" + def test_invalid_shape_in_throws_exception(self): + """Tests whether a shape_in argument that isn't (n,) throws an exception.""" with(self.assertRaises(ValueError)): SparseToDense(shape_in=(43200, 1), shape_out=(240, 180)) @@ -113,7 +113,7 @@ def test_third_dimension_not_2_throws_exception(self): SparseToDense(shape_in=(43200,), shape_out=(240, 180, 1)) - def test_negative_width_shape_in_throws_exception(self): + def test_negative_size_shape_in_throws_exception(self): """Tests whether an exception is thrown when a negative integer for the shape_in argument is given""" with(self.assertRaises(ValueError)): diff --git a/tests/lava/proc/event_data/event_pre_processor/sparse_to_sparse/test_binary_to_unary_polarity.py b/tests/lava/proc/event_data/event_pre_processor/sparse_to_sparse/test_binary_to_unary_polarity.py index 16d7ee112..29c672f60 100644 --- a/tests/lava/proc/event_data/event_pre_processor/sparse_to_sparse/test_binary_to_unary_polarity.py +++ b/tests/lava/proc/event_data/event_pre_processor/sparse_to_sparse/test_binary_to_unary_polarity.py @@ -86,13 +86,13 @@ def test_init(self): self.assertIsInstance(converter, BinaryToUnaryPolarity) self.assertEqual(converter.proc_params["shape"], (43200,)) - def test_too_many_dimensions_throws_exception(self): - """Tests whether a shape argument with too many dimensions throws an exception.""" + def test_invalid_shape_throws_exception(self): + """Tests whether a shape argument with an invalid shape throws an exception.""" with(self.assertRaises(ValueError)): BinaryToUnaryPolarity(shape=(240, 180)) - def test_negative_width_throws_exception(self): - """Tests whether a shape argument with a negative width throws an exception.""" + def test_negative_size_throws_exception(self): + """Tests whether a shape argument with a negative size throws an exception.""" with(self.assertRaises(ValueError)): BinaryToUnaryPolarity(shape=(-43200,)) From ee815335d76ece52c1698cc88c1cf566f2ac5084 Mon Sep 17 00:00:00 2001 From: gkarray Date: Wed, 7 Dec 2022 11:15:59 +0100 Subject: [PATCH 10/32] restructuring Processes --- src/lava/proc/down_sampling/models.py | 47 +++++ src/lava/proc/down_sampling/process.py | 61 ++++++ .../models.py} | 47 ++--- .../event_data/binary_to_unary/process.py | 47 +++++ .../dense_to_dense/down_sampling_dense.py | 190 ------------------ .../sparse_to_dense/sparse_to_dense.py | 105 ---------- .../event_data/event_pre_processor/utils.py | 24 --- .../aedat_data_loader.py | 3 +- src/lava/proc/event_data/to_frame/models.py | 61 ++++++ src/lava/proc/event_data/to_frame/process.py | 48 +++++ .../flattening.py | 0 src/lava/proc/max_pooling/models.py | 55 +++++ src/lava/proc/max_pooling/process.py | 69 +++++++ 13 files changed, 409 insertions(+), 348 deletions(-) create mode 100644 src/lava/proc/down_sampling/models.py create mode 100644 src/lava/proc/down_sampling/process.py rename src/lava/proc/event_data/{event_pre_processor/sparse_to_sparse/binary_to_unary_polarity.py => binary_to_unary/models.py} (54%) create mode 100644 src/lava/proc/event_data/binary_to_unary/process.py delete mode 100644 src/lava/proc/event_data/event_pre_processor/dense_to_dense/down_sampling_dense.py delete mode 100644 src/lava/proc/event_data/event_pre_processor/sparse_to_dense/sparse_to_dense.py delete mode 100644 src/lava/proc/event_data/event_pre_processor/utils.py rename src/lava/proc/event_data/{event_data_loader => io}/aedat_data_loader.py (98%) create mode 100644 src/lava/proc/event_data/to_frame/models.py create mode 100644 src/lava/proc/event_data/to_frame/process.py rename src/lava/proc/{event_data/event_pre_processor/dense_to_dense => event_temp}/flattening.py (100%) create mode 100644 src/lava/proc/max_pooling/models.py create mode 100644 src/lava/proc/max_pooling/process.py diff --git a/src/lava/proc/down_sampling/models.py b/src/lava/proc/down_sampling/models.py new file mode 100644 index 000000000..12031d571 --- /dev/null +++ b/src/lava/proc/down_sampling/models.py @@ -0,0 +1,47 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import numpy as np +from numpy.lib.stride_tricks import as_strided + +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol +from lava.magma.core.model.py.ports import PyInPort, PyOutPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.resources import CPU +from lava.magma.core.decorator import implements, requires +from lava.magma.core.model.py.model import PyLoihiProcessModel +from lava.proc.down_sampling.process import DownSampling +from lava.proc.conv import utils + + +@implements(proc=DownSampling, protocol=LoihiProtocol) +@requires(CPU) +class DownSamplingPM(PyLoihiProcessModel): + in_port: PyInPort = LavaPyType(PyInPort.VEC_DENSE, int) + out_port: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, int) + + stride: np.ndarray = LavaPyType(np.ndarray, np.int8, precision=8) + padding: np.ndarray = LavaPyType(np.ndarray, np.int8, precision=8) + + def run_spk(self) -> None: + data = self.in_port.recv() + + down_sampled_data = self._down_sample(data) + + self.out_port.send(down_sampled_data) + + def _down_sample(self, data: np.ndarray) -> np.ndarray: + output_shape = self.out_port.shape + + padded_data = np.pad(data, + (utils.make_tuple(self.padding[0]), + utils.make_tuple(self.padding[1])), + mode='constant') + + strides_w = (self.stride[0] * data.strides[0], + self.stride[1] * data.strides[1]) + + down_sampled_data = as_strided(padded_data, output_shape, strides_w) + + return down_sampled_data diff --git a/src/lava/proc/down_sampling/process.py b/src/lava/proc/down_sampling/process.py new file mode 100644 index 000000000..3b246c112 --- /dev/null +++ b/src/lava/proc/down_sampling/process.py @@ -0,0 +1,61 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import typing as ty + +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.process.ports.ports import InPort, OutPort +from lava.magma.core.process.variable import Var +from lava.proc.conv import utils + + +class DownSampling(AbstractProcess): + def __init__( + self, + shape_in: ty.Tuple[int], + stride: ty.Union[int, ty.Tuple[int, int]], + padding: ty.Optional[ty.Union[int, ty.Tuple[int, int]]] = (0, 0), + **kwargs) -> None: + super().__init__(shape_in=shape_in, + stride=stride, + padding=padding, + **kwargs) + + self._validate_shape_in(shape_in) + + in_channels = shape_in[-1] + out_channels = in_channels + + padding = utils.make_tuple(padding) + stride = utils.make_tuple(stride) + + shape_out = utils.output_shape( + shape_in, out_channels, (1, 1), stride, padding, (1, 1) + ) + + self.in_port = InPort(shape=shape_in) + self.out_port = OutPort(shape=shape_out) + self.padding = Var(shape=(2,), init=padding) + self.stride = Var(shape=(2,), init=stride) + + @staticmethod + def _validate_shape_in(shape_in): + if not (len(shape_in) == 2 or len(shape_in) == 3): + raise ValueError(f"shape_in should be 2 or 3 dimensional. " + f"{shape_in} given.") + + if not isinstance(shape_in[0], int) or not isinstance(shape_in[1], int): + raise ValueError(f"Width and height of shape_in should be integers." + f"{shape_in} given.") + if len(shape_in) == 3: + if shape_in[2] != 2: + raise ValueError(f"Third dimension of shape_in should be " + f"equal to 2. " + f"{shape_in} given.") + + if shape_in[0] <= 0 or shape_in[1] <= 0: + raise ValueError(f"Width and height of shape_in should be positive." + f"{shape_in} given.") + + return shape_in diff --git a/src/lava/proc/event_data/event_pre_processor/sparse_to_sparse/binary_to_unary_polarity.py b/src/lava/proc/event_data/binary_to_unary/models.py similarity index 54% rename from src/lava/proc/event_data/event_pre_processor/sparse_to_sparse/binary_to_unary_polarity.py rename to src/lava/proc/event_data/binary_to_unary/models.py index 4536578b9..29aa87707 100644 --- a/src/lava/proc/event_data/event_pre_processor/sparse_to_sparse/binary_to_unary_polarity.py +++ b/src/lava/proc/event_data/binary_to_unary/models.py @@ -3,44 +3,26 @@ # See: https://spdx.org/licenses/ import numpy as np -import typing as ty -from lava.magma.core.process.process import AbstractProcess -from lava.magma.core.process.ports.ports import InPort, OutPort from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol from lava.magma.core.model.py.ports import PyInPort, PyOutPort from lava.magma.core.model.py.type import LavaPyType from lava.magma.core.resources import CPU from lava.magma.core.decorator import implements, requires from lava.magma.core.model.py.model import PyLoihiProcessModel - - -class BinaryToUnaryPolarity(AbstractProcess): - def __init__(self, - *, - shape: ty.Tuple[int], - **kwargs) -> None: - super().__init__(shape=shape, - **kwargs) - - self._validate_shape(shape) - - self.in_port = InPort(shape=shape) - self.out_port = OutPort(shape=shape) - - @staticmethod - def _validate_shape(shape: ty.Tuple[int]) -> None: - if len(shape) != 1: - raise ValueError(f"Shape should be (n,). {shape} was given.") - - if shape[0] <= 0: - raise ValueError(f"Max number of events should be positive. " - f"{shape} was given.") +from lava.proc.event_data.binary_to_unary.process import BinaryToUnaryPolarity @implements(proc=BinaryToUnaryPolarity, protocol=LoihiProtocol) @requires(CPU) class BinaryToUnaryPolarityPM(PyLoihiProcessModel): + """PyLoihiProcessModel implementing the BinaryToUnaryPolarity Process. + + Transforms event-based data with binary polarity (0 for negative events, + 1 for positive events) coming from its in_port to unary polarity + (1 for negative and positive events) and sends it through its out_port. + """ + in_port: PyInPort = LavaPyType(PyInPort.VEC_SPARSE, int) out_port: PyOutPort = LavaPyType(PyOutPort.VEC_SPARSE, int) @@ -53,7 +35,18 @@ def run_spk(self) -> None: @staticmethod def _encode(data: np.ndarray) -> np.ndarray: + """Validate that a given shape is of the right format (max_num_events, ) + + Parameters + ---------- + data : ndarray + Event-based data with binary polarity. + + Returns + ---------- + result : ndarray + Event-based data with unary polarity. + """ data[data == 0] = 1 return data - diff --git a/src/lava/proc/event_data/binary_to_unary/process.py b/src/lava/proc/event_data/binary_to_unary/process.py new file mode 100644 index 000000000..89e72eb6f --- /dev/null +++ b/src/lava/proc/event_data/binary_to_unary/process.py @@ -0,0 +1,47 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import typing as ty + +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.process.ports.ports import InPort, OutPort + + +class BinaryToUnaryPolarity(AbstractProcess): + """Process that transforms event-based data with binary + polarity (0 for negative events, 1 for positive events) to unary + polarity (1 for negative and positive events). + + Parameters + ---------- + shape : tuple + Shape of InPort and OutPort. + """ + def __init__(self, + *, + shape: ty.Tuple[int], + **kwargs) -> None: + super().__init__(shape=shape, + **kwargs) + + self._validate_shape(shape) + + self.in_port = InPort(shape=shape) + self.out_port = OutPort(shape=shape) + + @staticmethod + def _validate_shape(shape: ty.Tuple[int]) -> None: + """Validate that a given shape is of the right format (max_num_events, ) + + Parameters + ---------- + shape : tuple + Shape to validate. + """ + if len(shape) != 1: + raise ValueError(f"Shape should be (n,). {shape} was given.") + + if shape[0] <= 0: + raise ValueError(f"Max number of events should be positive. " + f"{shape} was given.") diff --git a/src/lava/proc/event_data/event_pre_processor/dense_to_dense/down_sampling_dense.py b/src/lava/proc/event_data/event_pre_processor/dense_to_dense/down_sampling_dense.py deleted file mode 100644 index 978594028..000000000 --- a/src/lava/proc/event_data/event_pre_processor/dense_to_dense/down_sampling_dense.py +++ /dev/null @@ -1,190 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: BSD-3-Clause -# See: https://spdx.org/licenses/ - -import numpy as np -from numpy.lib.stride_tricks import as_strided -from scipy import signal -import typing as ty - -from lava.magma.core.process.process import AbstractProcess -from lava.magma.core.process.ports.ports import InPort, OutPort -from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol -from lava.magma.core.model.py.ports import PyInPort, PyOutPort -from lava.magma.core.model.py.type import LavaPyType -from lava.magma.core.resources import CPU -from lava.magma.core.decorator import implements, requires -from lava.magma.core.model.py.model import PyLoihiProcessModel -from lava.proc.event_data.event_pre_processor.utils import \ - DownSamplingMethodDense - -class DownSamplingDense(AbstractProcess): - def __init__(self, - shape_in: ty.Tuple[int], - down_sampling_method: DownSamplingMethodDense, - down_sampling_factor: int, - **kwargs) -> None: - super().__init__(shape_in=shape_in, - down_sampling_method=down_sampling_method, - down_sampling_factor=down_sampling_factor, - **kwargs) - - self._validate_shape_in(shape_in) - self._validate_down_sampling_method(down_sampling_method) - self._validate_down_sampling_factor(down_sampling_factor) - # test invalid shape in (negative/decimal values, 1d, 4+d, 3rd dim not 2) - # test for invalid down sampling factor (negative values) - # test for invalid type given to down sampling method - - shape_out = (shape_in[0] // down_sampling_factor, - shape_in[1] // down_sampling_factor) - self.in_port = InPort(shape=shape_in) - self.out_port = OutPort(shape=shape_out) - - @staticmethod - def _validate_shape_in(shape_in): - if not (len(shape_in) == 2 or len(shape_in) == 3): - raise ValueError(f"shape_in should be 2 or 3 dimensional. " - f"{shape_in} given.") - - if not isinstance(shape_in[0], int) or not isinstance(shape_in[1], int): - raise ValueError(f"Width and height of shape_in should be integers." - f"{shape_in} given.") - if len(shape_in) == 3: - if shape_in[2] != 2: - raise ValueError(f"Third dimension of shape_in should be " - f"equal to 2. " - f"{shape_in} given.") - - if shape_in[0] <= 0 or shape_in[1] <= 0: - raise ValueError(f"Width and height of shape_in should be positive." - f"{shape_in} given.") - - return shape_in - - @staticmethod - def _validate_down_sampling_method(down_sampling_method): - if not isinstance(down_sampling_method, DownSamplingMethodDense): - raise (TypeError( - f"Down sampling methods for dense to dense down-sampling need to be " - f"selected using the DownSamplingMethodDense Enum.")) - # TODO: mention that it's an enum in error message? - - @staticmethod - def _validate_down_sampling_factor(down_sampling_factor): - # TODO: should the down sampling factor be a float or an int? - if not isinstance(down_sampling_factor, int): - raise (ValueError(f"Down sampling factor should be an integer." - f"{down_sampling_factor} given.")) - - if down_sampling_factor <= 0: - raise ValueError(f"Down sampling factor should be positive." - f"{down_sampling_factor} given.") - - -@implements(proc=DownSamplingDense, protocol=LoihiProtocol) -@requires(CPU) -class DownSamplingDensePM(PyLoihiProcessModel): - in_port: PyInPort = LavaPyType(PyInPort.VEC_DENSE, int) - out_port: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, int) - - def __init__(self, proc_params: dict) -> None: - super().__init__(proc_params) - self._shape_in = proc_params["shape_in"] - self._down_sampling_method = proc_params["down_sampling_method"] - self._down_sampling_factor = proc_params["down_sampling_factor"] - - self._shape_out = (self._shape_in[0] // self._down_sampling_factor, - self._shape_in[1] // self._down_sampling_factor) - - def run_spk(self) -> None: - data = self.in_port.recv() - - down_sampled_data = self._down_sample(data) - - self.out_port.send(down_sampled_data) - - def _down_sample(self, data: np.ndarray) -> np.ndarray: - if self._down_sampling_method == DownSamplingMethodDense.SKIPPING: - down_sampled_data = \ - self._down_sample_skipping(data, - self._down_sampling_factor, - self._shape_out[0], - self._shape_out[1]) - - elif self._down_sampling_method == DownSamplingMethodDense.MAX_POOLING: - down_sampled_data = \ - self._down_sample_max_pooling(data, - self._down_sampling_factor, - self._shape_out[0], - self._shape_out[1]) - - elif self._down_sampling_method == DownSamplingMethodDense.CONVOLUTION: - down_sampled_data = \ - self._down_sample_convolution(data, - self._down_sampling_factor, - self._shape_out[0], - self._shape_out[1]) - - else: - # TODO : Remove since validation is taking care of this ? - raise ValueError(f"Unknown down_sample_mode." - f"{self._down_sampling_method=} given.") - - return down_sampled_data - - @staticmethod - def _down_sample_skipping(data: np.ndarray, - down_sampling_factor: int, - down_sampled_width: int, - down_sampled_height: int) -> np.ndarray: - down_sampled_data = \ - data[::down_sampling_factor, ::down_sampling_factor] - - down_sampled_data = \ - down_sampled_data[:down_sampled_width, :down_sampled_height] - - return down_sampled_data - - @staticmethod - def _down_sample_max_pooling(data: np.ndarray, - down_sampling_factor: int, - down_sampled_width: int, - down_sampled_height: int) -> np.ndarray: - output_shape = \ - ((data.shape[0] - down_sampling_factor) // down_sampling_factor + 1, - (data.shape[1] - down_sampling_factor) // down_sampling_factor + 1) - - shape_w = (output_shape[0], - output_shape[1], - down_sampling_factor, - down_sampling_factor) - strides_w = (down_sampling_factor * data.strides[0], - down_sampling_factor * data.strides[1], - data.strides[0], - data.strides[1]) - - down_sampled_data = as_strided(data, shape_w, strides_w) - down_sampled_data = down_sampled_data.max(axis=(2, 3)) - - # TODO: Is this really needed ? - down_sampled_data = \ - down_sampled_data[:down_sampled_width, :down_sampled_height] - - return down_sampled_data - - @staticmethod - def _down_sample_convolution(data: np.ndarray, - down_sampling_factor: int, - down_sampled_width: int, - down_sampled_height: int) -> np.ndarray: - kernel = np.ones((down_sampling_factor, down_sampling_factor)) - data_convolved = signal.convolve2d(data, kernel) - - down_sampled_data = \ - data_convolved[::down_sampling_factor, ::down_sampling_factor] - - down_sampled_data = \ - down_sampled_data[:down_sampled_width, :down_sampled_height] - - return down_sampled_data diff --git a/src/lava/proc/event_data/event_pre_processor/sparse_to_dense/sparse_to_dense.py b/src/lava/proc/event_data/event_pre_processor/sparse_to_dense/sparse_to_dense.py deleted file mode 100644 index c6dee060b..000000000 --- a/src/lava/proc/event_data/event_pre_processor/sparse_to_dense/sparse_to_dense.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: BSD-3-Clause -# See: https://spdx.org/licenses/ - -import numpy as np -import typing as ty - -from lava.magma.core.process.process import AbstractProcess -from lava.magma.core.process.ports.ports import InPort, OutPort -from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol -from lava.magma.core.model.py.ports import PyInPort, PyOutPort -from lava.magma.core.model.py.type import LavaPyType -from lava.magma.core.resources import CPU -from lava.magma.core.decorator import implements, requires -from lava.magma.core.model.py.model import PyLoihiProcessModel - - -class SparseToDense(AbstractProcess): - def __init__(self, - *, - shape_in: ty.Tuple[int], - shape_out: ty.Union[ty.Tuple[int, int], ty.Tuple[int, int, int]], - **kwargs) -> None: - super().__init__(shape_in=shape_in, - shape_out=shape_out, - **kwargs) - - self._validate_shape_in(shape_in) - self._validate_shape_out(shape_out) - - self.in_port = InPort(shape=shape_in) - self.out_port = OutPort(shape=shape_out) - - @staticmethod - def _validate_shape_in(shape_in: ty.Tuple[int]) -> None: - if len(shape_in) != 1: - raise ValueError(f"Shape of the InPort should be (n,). " - f"{shape_in} was given.") - - if shape_in[0] <= 0: - raise ValueError(f"Width of shape_in should be positive. {shape_in} given.") - - @staticmethod - def _validate_shape_out(shape_out: ty.Union[ty.Tuple[int, int], ty.Tuple[int, int, int]]) -> None: - if not (len(shape_out) == 2 or len(shape_out) == 3): - raise ValueError(f"shape_out should be 2 or 3 dimensional. {shape_out} given.") - - if len(shape_out) == 3: - if shape_out[2] != 2: - raise ValueError(f"Depth of the shape_out argument should be an integer and equal to 2. " - f"{shape_out} given.") - - if shape_out[0] <= 0 or shape_out[1] <= 0: - raise ValueError(f"Width and height of the shape_out argument should be positive. " - f"{shape_out} given.") - - -@implements(proc=SparseToDense, protocol=LoihiProtocol) -@requires(CPU) -class SparseToDensePM(PyLoihiProcessModel): - in_port: PyInPort = LavaPyType(PyInPort.VEC_SPARSE, int) - out_port: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, int) - - def __init__(self, proc_params: dict) -> None: - super().__init__(proc_params) - self._shape_out = proc_params["shape_out"] - - def run_spk(self) -> None: - data, indices = self.in_port.recv() - - dense_data = self._transform(data, indices) - - self.out_port.send(dense_data) - - def _transform(self, data: np.ndarray, indices: np.ndarray) -> np.ndarray: - if len(self._shape_out) == 2: - return self._transform_2d(data, indices) - elif len(self._shape_out) == 3: - return self._transform_3d(data, indices) - # TODO : Should we add an else here ? - # TODO : We will never reach it if correctly validated - - def _transform_2d(self, - data: np.ndarray, - indices: np.ndarray) -> np.ndarray: - dense_data = np.zeros(self._shape_out) - - xs, ys = np.unravel_index(indices, self._shape_out) - - dense_data[xs[data == 0], ys[data == 0]] = 1 - dense_data[xs[data == 1], ys[data == 1]] = 1 - - return dense_data - - def _transform_3d(self, - data: np.ndarray, - indices: np.ndarray) -> np.ndarray: - dense_data = np.zeros(self._shape_out) - - xs, ys = np.unravel_index(indices, self._shape_out[:-1]) - - dense_data[xs[data == 0], ys[data == 0], 0] = 1 - dense_data[xs[data == 1], ys[data == 1], 1] = 1 - - return dense_data diff --git a/src/lava/proc/event_data/event_pre_processor/utils.py b/src/lava/proc/event_data/event_pre_processor/utils.py deleted file mode 100644 index 99c28e8f4..000000000 --- a/src/lava/proc/event_data/event_pre_processor/utils.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: BSD-3-Clause -# See: https://spdx.org/licenses/ - -from enum import IntEnum - - -class DownSamplingMethodSparse(IntEnum): - SKIPPING = 0 - MAX_POOLING = 1 - - -class UpSamplingMethodSparse(IntEnum): - REPEAT = 0 - - -class DownSamplingMethodDense(IntEnum): - SKIPPING = 0 - MAX_POOLING = 1 - CONVOLUTION = 2 - - -class UpSamplingMethodDense(IntEnum): - REPEAT = 0 diff --git a/src/lava/proc/event_data/event_data_loader/aedat_data_loader.py b/src/lava/proc/event_data/io/aedat_data_loader.py similarity index 98% rename from src/lava/proc/event_data/event_data_loader/aedat_data_loader.py rename to src/lava/proc/event_data/io/aedat_data_loader.py index 9fc4d56a8..35b8eb0b8 100644 --- a/src/lava/proc/event_data/event_data_loader/aedat_data_loader.py +++ b/src/lava/proc/event_data/io/aedat_data_loader.py @@ -20,8 +20,7 @@ class AedatDataLoader(AbstractProcess): - """ - Process that reads data from an aedat4 file. + """Process that reads event-based data from an aedat4 file. This process outputs a sparse tensor of the event data stream, meaning two 1-dimensional vectors containing polarity data and indices. The diff --git a/src/lava/proc/event_data/to_frame/models.py b/src/lava/proc/event_data/to_frame/models.py new file mode 100644 index 000000000..6e8faac00 --- /dev/null +++ b/src/lava/proc/event_data/to_frame/models.py @@ -0,0 +1,61 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import numpy as np + +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol +from lava.magma.core.model.py.ports import PyInPort, PyOutPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.resources import CPU +from lava.magma.core.decorator import implements, requires +from lava.magma.core.model.py.model import PyLoihiProcessModel +from lava.proc.event_data.to_frame.process import ToFrame + + +@implements(proc=ToFrame, protocol=LoihiProtocol) +@requires(CPU) +class ToFramePM(PyLoihiProcessModel): + in_port: PyInPort = LavaPyType(PyInPort.VEC_SPARSE, int) + out_port: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, int) + + def __init__(self, proc_params: dict) -> None: + super().__init__(proc_params) + self._shape_out = proc_params["shape_out"] + + def run_spk(self) -> None: + data, indices = self.in_port.recv() + + dense_data = self._transform(data, indices) + + self.out_port.send(dense_data) + + def _transform(self, data: np.ndarray, indices: np.ndarray) -> np.ndarray: + if len(self._shape_out) == 2: + return self._transform_2d(data, indices) + elif len(self._shape_out) == 3: + return self._transform_3d(data, indices) + + def _transform_2d(self, + data: np.ndarray, + indices: np.ndarray) -> np.ndarray: + dense_data = np.zeros(self._shape_out) + + xs, ys = np.unravel_index(indices, self._shape_out) + + dense_data[xs[data == 0], ys[data == 0]] = 1 + dense_data[xs[data == 1], ys[data == 1]] = 1 + + return dense_data + + def _transform_3d(self, + data: np.ndarray, + indices: np.ndarray) -> np.ndarray: + dense_data = np.zeros(self._shape_out) + + xs, ys = np.unravel_index(indices, self._shape_out[:-1]) + + dense_data[xs[data == 0], ys[data == 0], 0] = 1 + dense_data[xs[data == 1], ys[data == 1], 1] = 1 + + return dense_data diff --git a/src/lava/proc/event_data/to_frame/process.py b/src/lava/proc/event_data/to_frame/process.py new file mode 100644 index 000000000..22438020d --- /dev/null +++ b/src/lava/proc/event_data/to_frame/process.py @@ -0,0 +1,48 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import typing as ty + +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.process.ports.ports import InPort, OutPort + + +class ToFrame(AbstractProcess): + def __init__(self, + *, + shape_in: ty.Tuple[int], + shape_out: ty.Union[ty.Tuple[int, int], ty.Tuple[int, int, int]], + **kwargs) -> None: + super().__init__(shape_in=shape_in, + shape_out=shape_out, + **kwargs) + + self._validate_shape_in(shape_in) + self._validate_shape_out(shape_out) + + self.in_port = InPort(shape=shape_in) + self.out_port = OutPort(shape=shape_out) + + @staticmethod + def _validate_shape_in(shape_in: ty.Tuple[int]) -> None: + if len(shape_in) != 1: + raise ValueError(f"Shape of the InPort should be (n,). " + f"{shape_in} was given.") + + if shape_in[0] <= 0: + raise ValueError(f"Width of shape_in should be positive. {shape_in} given.") + + @staticmethod + def _validate_shape_out(shape_out: ty.Union[ty.Tuple[int, int], ty.Tuple[int, int, int]]) -> None: + if not (len(shape_out) == 2 or len(shape_out) == 3): + raise ValueError(f"shape_out should be 2 or 3 dimensional. {shape_out} given.") + + if len(shape_out) == 3: + if shape_out[2] != 2: + raise ValueError(f"Depth of the shape_out argument should be an integer and equal to 2. " + f"{shape_out} given.") + + if shape_out[0] <= 0 or shape_out[1] <= 0: + raise ValueError(f"Width and height of the shape_out argument should be positive. " + f"{shape_out} given.") diff --git a/src/lava/proc/event_data/event_pre_processor/dense_to_dense/flattening.py b/src/lava/proc/event_temp/flattening.py similarity index 100% rename from src/lava/proc/event_data/event_pre_processor/dense_to_dense/flattening.py rename to src/lava/proc/event_temp/flattening.py diff --git a/src/lava/proc/max_pooling/models.py b/src/lava/proc/max_pooling/models.py new file mode 100644 index 000000000..5f0cba467 --- /dev/null +++ b/src/lava/proc/max_pooling/models.py @@ -0,0 +1,55 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import numpy as np +from numpy.lib.stride_tricks import as_strided + +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol +from lava.magma.core.model.py.ports import PyInPort, PyOutPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.resources import CPU +from lava.magma.core.decorator import implements, requires +from lava.magma.core.model.py.model import PyLoihiProcessModel +from lava.proc.max_pooling.process import MaxPooling +from lava.proc.conv import utils + + +@implements(proc=MaxPooling, protocol=LoihiProtocol) +@requires(CPU) +class MaxPoolingPM(PyLoihiProcessModel): + in_port: PyInPort = LavaPyType(PyInPort.VEC_DENSE, int) + out_port: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, int) + + kernel_size: np.ndarray = LavaPyType(np.ndarray, np.int8, precision=8) + stride: np.ndarray = LavaPyType(np.ndarray, np.int8, precision=8) + padding: np.ndarray = LavaPyType(np.ndarray, np.int8, precision=8) + + def run_spk(self) -> None: + data = self.in_port.recv() + + max_pooled_data = self._max_pooling(data) + + self.out_port.send(max_pooled_data) + + def _max_pooling(self, data: np.ndarray) -> np.ndarray: + output_shape = self.out_port.shape + + padded_data = np.pad(data, + (utils.make_tuple(self.padding[0]), + utils.make_tuple(self.padding[1])), + mode='constant') + + shape_w = (output_shape[0], + output_shape[1], + self.kernel_size[0], + self.kernel_size[1]) + strides_w = (self.stride[0] * data.strides[0], + self.stride[1] * data.strides[1], + data.strides[0], + data.strides[1]) + + pooled_data = as_strided(padded_data, shape_w, strides_w) + max_pooled_data = pooled_data.max(axis=(2, 3)) + + return max_pooled_data diff --git a/src/lava/proc/max_pooling/process.py b/src/lava/proc/max_pooling/process.py new file mode 100644 index 000000000..30c8e8133 --- /dev/null +++ b/src/lava/proc/max_pooling/process.py @@ -0,0 +1,69 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import typing as ty + +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.process.ports.ports import InPort, OutPort +from lava.magma.core.process.variable import Var +from lava.proc.conv import utils + + +class MaxPooling(AbstractProcess): + def __init__( + self, + *, + shape_in: ty.Tuple[int, int, int], + kernel_size: ty.Union[int, ty.Tuple[int, int]], + stride: ty.Optional[ty.Union[int, ty.Tuple[int, int]]] = None, + padding: ty.Optional[ty.Union[int, ty.Tuple[int, int]]] = (0, 0), + **kwargs) -> None: + super().__init__(shape_in=shape_in, + kernel_size=kernel_size, + stride=stride, + padding=padding, + **kwargs) + + self._validate_shape_in(shape_in) + + if stride is None: + stride = kernel_size + + in_channels = shape_in[-1] + out_channels = in_channels + + kernel_size = utils.make_tuple(kernel_size) + padding = utils.make_tuple(padding) + stride = utils.make_tuple(stride) + + shape_out = utils.output_shape( + shape_in, out_channels, kernel_size, stride, padding, (1, 1) + ) + + self.in_port = InPort(shape=shape_in) + self.out_port = OutPort(shape=shape_out) + self.kernel_size = Var(shape=(2,), init=kernel_size) + self.padding = Var(shape=(2,), init=padding) + self.stride = Var(shape=(2,), init=stride) + + @staticmethod + def _validate_shape_in(shape_in): + if not (len(shape_in) == 2 or len(shape_in) == 3): + raise ValueError(f"shape_in should be 2 or 3 dimensional. " + f"{shape_in} given.") + + if not isinstance(shape_in[0], int) or not isinstance(shape_in[1], int): + raise ValueError(f"Width and height of shape_in should be integers." + f"{shape_in} given.") + if len(shape_in) == 3: + if shape_in[2] != 2: + raise ValueError(f"Third dimension of shape_in should be " + f"equal to 2. " + f"{shape_in} given.") + + if shape_in[0] <= 0 or shape_in[1] <= 0: + raise ValueError(f"Width and height of shape_in should be positive." + f"{shape_in} given.") + + return shape_in From 57c89296ef1ca7cf97c514982b211b1d6dbdd8de Mon Sep 17 00:00:00 2001 From: SveaMeyer13 Date: Wed, 7 Dec 2022 17:02:46 +0100 Subject: [PATCH 11/32] add beginning of input process --- .../event_data/event_data_loader/dv_stream.py | 84 ++++++++++++ .../event_data_loader/test_dv_stream.py | 124 ++++++++++++++++++ 2 files changed, 208 insertions(+) create mode 100644 src/lava/proc/event_data/event_data_loader/dv_stream.py create mode 100644 tests/lava/proc/event_data/event_data_loader/test_dv_stream.py diff --git a/src/lava/proc/event_data/event_data_loader/dv_stream.py b/src/lava/proc/event_data/event_data_loader/dv_stream.py new file mode 100644 index 000000000..7efc89f08 --- /dev/null +++ b/src/lava/proc/event_data/event_data_loader/dv_stream.py @@ -0,0 +1,84 @@ +import typing as ty + +from lava.magma.core.decorator import implements, requires +from lava.magma.core.model.py.model import PyLoihiProcessModel +from lava.magma.core.model.py.ports import PyOutPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.process.ports.ports import OutPort +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.resources import CPU +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol +from dv import NetworkNumpyEventPacketInput + +class DvStream(AbstractProcess): + """ + Parameters + ---------- + + """ + def __init__(self, + *, + address: str, + port: int, + shape_out: ty.Tuple[int]) -> None: + super().__init__(address=address, + port=port, + shape_out=shape_out) + self._validate_shape_out(shape_out) + self._validate_port(port) + self._validate_address(address) + self.out_port = OutPort(shape=shape_out) + + + @staticmethod + def _validate_shape_out(shape_out: ty.Tuple[int]) -> None: + """ + Checks whether the given shape is valid and that the size given + is not a negative number. Raises relevant exception if not + """ + if len(shape_out) != 1: + raise ValueError(f"Shape of the OutPort should be (n,). " + f"{shape_out} was given.") + if shape_out[0] <= 0: + raise ValueError(f"Max number of events should be positive. " + f"{shape_out} was given.") + + @staticmethod + def _validate_port(port: int) -> None: + """ + Check whether the given port is valid. Raises relevant exception if not + """ + + if not (0 <= port <= 65535): + raise ValueError(f"Port should be between 0 and 65535" + f"{port} was given.") + + @staticmethod + def _validate_address(address: str) -> None: + """ + Check that address is not an ampty string. Raises relevant exception if not + """ + + if not address: + raise ValueError("Address should not be empty") + + +@implements(proc=DvStream, protocol=LoihiProtocol) +@requires(CPU) +class DvStreamPM(PyLoihiProcessModel): + """ + Implementation of the DvStream process on Loihi, with sparse + representation of events. + """ + out_port: PyOutPort = LavaPyType(PyOutPort.VEC_SPARSE, int) + + def __init__(self, proc_params: dict) -> None: + super().__init__(proc_params) + self._address = proc_params["address"] + self._port = proc_params["port"] + self._shape_out = proc_params["shape_out"] + self._event_stream = proc_params.get("_event_stream") + if not self._event_stream: + self._event_stream = NetworkNumpyEventPacketInput(address=self._address, port=self._port) + + diff --git a/tests/lava/proc/event_data/event_data_loader/test_dv_stream.py b/tests/lava/proc/event_data/event_data_loader/test_dv_stream.py new file mode 100644 index 000000000..76c626500 --- /dev/null +++ b/tests/lava/proc/event_data/event_data_loader/test_dv_stream.py @@ -0,0 +1,124 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +from dv import AedatFile +from dv.AedatFile import _AedatFileEventNumpyPacketIterator +import numpy as np +import typing as ty +import unittest + +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.process.ports.ports import InPort, OutPort +from lava.magma.core.process.variable import Var +from lava.magma.core.resources import CPU +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol +from lava.magma.core.model.py.ports import PyInPort, PyOutPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.decorator import implements, requires +from lava.magma.core.model.py.model import PyLoihiProcessModel +from lava.magma.core.run_conditions import RunSteps +from lava.magma.core.run_configs import Loihi1SimCfg +from lava.proc.event_data.event_data_loader.dv_stream import DvStream, DvStreamPM + + +class TestProcessDvStream(unittest.TestCase): + def test_init(self): + """ + Tests instantiation of AedatDataLoader. + """ + + stream = DvStream(address="127.0.0.1", + port=7777, + shape_out=(43200,)) + + self.assertIsInstance(stream, DvStream) + self.assertEqual((43200,), stream.out_port.shape) + + def test_invalid_shape_throws_exception(self): + """ + Tests whether a shape_out argument with an invalid shape throws an exception. + """ + with(self.assertRaises(ValueError)): + DvStream(address="127.0.0.1", + port=7777, + shape_out=(240, 180)) + + def test_negative_size_throws_exception(self): + """ + Tests whether a shape_out argument with a negative size throws an exception. + """ + with(self.assertRaises(ValueError)): + DvStream(address="127.0.0.1", + port=7777, + shape_out=(-240,)) + + def test_negative_port_throws_exception(self): + """ + Tests whether a port argument with a negative size throws an exception. + """ + with(self.assertRaises(ValueError)): + DvStream(address="127.0.0.1", + port=-7777, + shape_out=(43200,)) + + def test_port_out_of_range_throws_exception(self): + """ + Tests whether a port argument that is out of range throws an error. + """ + with(self.assertRaises(ValueError)): + DvStream(address="127.0.0.1", + port=7777777, + shape_out=(43200,)) + + def test_address_empty_string_throws_exception(self): + with(self.assertRaises(ValueError)): + DvStream(address="", + port=7777, + shape_out=(43200,)) + +class TestProcessModelDvStream(unittest.TestCase): + def test_init(self): + """ + Tests instantiation of the DvStream process model. + """ + proc_params = { + "address": "127.0.0.1", + "port": 7777, + "shape_out": (43200,), + } + + pm = DvStreamPM(proc_params) + + self.assertIsInstance(pm, DvStreamPM) + + def test_run_spike(self): + class PacketInput(ty.Protocol): + def __next__(self): + ... + + class MockPacketInput: + def __next__(self): + return { + "x": 35, + "y": 35, + "polarity": 0, + } + + + proc_params = { + "address": "127.0.0.1", + "port": 7777, + "shape_out": (43200,), + "_event_stream": MockPacketInput() + + } + + pm = DvStreamPM(proc_params) + pm.run_spk() + + + + +if __name__ == '__main__': + unittest.main() From 1bd6346970d565495c62424e3408d391135626c1 Mon Sep 17 00:00:00 2001 From: "Cohen-Dumani, Joshua" Date: Wed, 7 Dec 2022 17:26:09 +0100 Subject: [PATCH 12/32] refactored tests --- tests/lava/proc/down_sampling/__init__.py | 0 .../test_down_sampling_dense.py | 9 +- .../test_binary_to_unary_polarity.py | 0 .../test_flattening.py | 0 .../test_aedat_data_loader.py | 70 +++++++++++++-- .../test_to_frame.py} | 88 +++++++++---------- 6 files changed, 107 insertions(+), 60 deletions(-) create mode 100644 tests/lava/proc/down_sampling/__init__.py rename tests/lava/proc/{event_data/event_pre_processor/dense_to_dense => down_sampling}/test_down_sampling_dense.py (97%) rename tests/lava/proc/event_data/{event_pre_processor/sparse_to_sparse => binary_to_unary}/test_binary_to_unary_polarity.py (100%) rename tests/lava/proc/event_data/{event_pre_processor/dense_to_dense => event_temp}/test_flattening.py (100%) rename tests/lava/proc/event_data/{event_data_loader => io}/test_aedat_data_loader.py (82%) rename tests/lava/proc/event_data/{event_pre_processor/sparse_to_dense/test_sparse_to_dense.py => to_frame/test_to_frame.py} (73%) diff --git a/tests/lava/proc/down_sampling/__init__.py b/tests/lava/proc/down_sampling/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/lava/proc/event_data/event_pre_processor/dense_to_dense/test_down_sampling_dense.py b/tests/lava/proc/down_sampling/test_down_sampling_dense.py similarity index 97% rename from tests/lava/proc/event_data/event_pre_processor/dense_to_dense/test_down_sampling_dense.py rename to tests/lava/proc/down_sampling/test_down_sampling_dense.py index 090f477d0..861c7a5d8 100644 --- a/tests/lava/proc/event_data/event_pre_processor/dense_to_dense/test_down_sampling_dense.py +++ b/tests/lava/proc/down_sampling/test_down_sampling_dense.py @@ -1,27 +1,22 @@ # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ -import unittest -from lava.proc.event_data.event_pre_processor.dense_to_dense.down_sampling_dense import DownSamplingDense, DownSamplingDensePM -from lava.proc.event_data.event_pre_processor.utils import DownSamplingMethodDense import numpy as np +import unittest from lava.magma.core.process.process import AbstractProcess from lava.magma.core.process.ports.ports import InPort, OutPort from lava.magma.core.process.variable import Var - from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol from lava.magma.core.model.py.ports import PyInPort, PyOutPort from lava.magma.core.model.py.type import LavaPyType from lava.magma.core.resources import CPU from lava.magma.core.decorator import implements, requires from lava.magma.core.model.py.model import PyLoihiProcessModel - from lava.magma.core.run_conditions import RunSteps from lava.magma.core.run_configs import Loihi1SimCfg - -import matplotlib.pyplot as plt +from lava.proc.down_sampling.models import DownSampling, DownSamplingPM class RecvDense(AbstractProcess): diff --git a/tests/lava/proc/event_data/event_pre_processor/sparse_to_sparse/test_binary_to_unary_polarity.py b/tests/lava/proc/event_data/binary_to_unary/test_binary_to_unary_polarity.py similarity index 100% rename from tests/lava/proc/event_data/event_pre_processor/sparse_to_sparse/test_binary_to_unary_polarity.py rename to tests/lava/proc/event_data/binary_to_unary/test_binary_to_unary_polarity.py diff --git a/tests/lava/proc/event_data/event_pre_processor/dense_to_dense/test_flattening.py b/tests/lava/proc/event_data/event_temp/test_flattening.py similarity index 100% rename from tests/lava/proc/event_data/event_pre_processor/dense_to_dense/test_flattening.py rename to tests/lava/proc/event_data/event_temp/test_flattening.py diff --git a/tests/lava/proc/event_data/event_data_loader/test_aedat_data_loader.py b/tests/lava/proc/event_data/io/test_aedat_data_loader.py similarity index 82% rename from tests/lava/proc/event_data/event_data_loader/test_aedat_data_loader.py rename to tests/lava/proc/event_data/io/test_aedat_data_loader.py index a74a12b98..8d39929b2 100644 --- a/tests/lava/proc/event_data/event_data_loader/test_aedat_data_loader.py +++ b/tests/lava/proc/event_data/io/test_aedat_data_loader.py @@ -19,8 +19,7 @@ from lava.magma.core.model.py.model import PyLoihiProcessModel from lava.magma.core.run_conditions import RunSteps from lava.magma.core.run_configs import Loihi1SimCfg -from lava.proc.event_data.event_data_loader.aedat_data_loader import AedatDataLoader, \ - AedatDataLoaderPM +from lava.proc.event_data.io.aedat_data_loader import AedatDataLoader, AedatDataLoaderPM class RecvSparse(AbstractProcess): """ @@ -50,7 +49,8 @@ class PyRecvSparsePM(PyLoihiProcessModel): def run_spk(self) -> None: """ - Receives the data and pads with zeros to fit them to the port shape. TODO: why? + Receives the data and pads with zeros to be able to access it with + Lava Vars. """ data, idx = self.in_port.recv() @@ -133,15 +133,56 @@ def test_run_without_sub_sampling(self): user parameters are all correct. TODO: implement this test, show functionality without sub-sampling """ + data_history = [ + [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [0] + ] + indices_history = [ + [1597, 2308, 2486, 2496, 2498, 1787, 2642, 2633, 2489, + 2488, 1596, 1729, 1727, 2500, 1780], + [1600, 1732, 2297, 1388, 2290, 2305, 3704, 3519, 1911], + [7138, 2301, 2471, 1601, 2982, 1364, 1379, 1386, 1384, + 2983, 1390, 2289, 1401, 1362, 2293], + [1910, 1382, 1909, 1562, 1606, 1381], + [464] + ] + seed_rng = 0 + rng = np.random.default_rng(seed=seed_rng) + + max_num_events = 15 data_loader = AedatDataLoader(file_path="../dvs_recording.aedat4", - shape_out=(3000,)) + shape_out=(max_num_events,), + seed_sub_sampling=seed_rng) + recv_sparse = RecvSparse(shape=(max_num_events,)) - num_steps = 9 + data_loader.out_port.connect(recv_sparse.in_port) + + # Run parameters + num_steps = 5 run_cfg = Loihi1SimCfg() - run_cnd = RunSteps(num_steps=num_steps) + run_cnd = RunSteps(num_steps=1) - data_loader.run(condition=run_cnd, run_cfg=run_cfg) + # Running + for i in range(num_steps): + data_loader.run(condition=run_cnd, run_cfg=run_cfg) + + expected_data = np.array(data_history[i]) + expected_indices = np.array(indices_history[i]) + + sent_and_received_data = \ + recv_sparse.data.get()[:expected_data.shape[0]] + sent_and_received_indices = \ + recv_sparse.idx.get()[:expected_indices.shape[0]] + np.testing.assert_equal(sent_and_received_data, + expected_data) + np.testing.assert_equal(sent_and_received_indices, + expected_indices) + + # Stopping data_loader.stop() def test_sub_sampling(self): @@ -169,6 +210,15 @@ def test_sub_sampling(self): seed_rng = 0 rng = np.random.default_rng(seed=seed_rng) + expected_data = [ + [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + ] + + expected_indices = [ + [1787, 1780, 2498, 2633, 2486, 1597, 1727, 2496, 2500, 1729], + [1600, 1732, 2297, 1388, 2290, 2305, 3704, 3519, 1911,] + ] + max_num_events = 10 data_loader = AedatDataLoader(file_path="../dvs_recording.aedat4", shape_out=(max_num_events,), @@ -178,7 +228,7 @@ def test_sub_sampling(self): data_loader.out_port.connect(recv_sparse.in_port) # Run parameters - num_steps = 5 + num_steps = 2 run_cfg = Loihi1SimCfg() run_cnd = RunSteps(num_steps=1) @@ -199,7 +249,6 @@ def test_sub_sampling(self): sampled_idx = rng.choice(data_idx_array, max_num_events, replace=False) - # TODO: assert that after subsampling, the number of events is the maximum. Could also hard code expected events expected_data = expected_data[sampled_idx] expected_indices = expected_indices[sampled_idx] @@ -209,6 +258,9 @@ def test_sub_sampling(self): np.testing.assert_equal(sent_and_received_indices, expected_indices) + print(f"data timestep {i}: ", sent_and_received_data) + print(f"indices timestep {i}: ", sent_and_received_indices) + # Stopping data_loader.stop() diff --git a/tests/lava/proc/event_data/event_pre_processor/sparse_to_dense/test_sparse_to_dense.py b/tests/lava/proc/event_data/to_frame/test_to_frame.py similarity index 73% rename from tests/lava/proc/event_data/event_pre_processor/sparse_to_dense/test_sparse_to_dense.py rename to tests/lava/proc/event_data/to_frame/test_to_frame.py index 56650d184..270198ccd 100644 --- a/tests/lava/proc/event_data/event_pre_processor/sparse_to_dense/test_sparse_to_dense.py +++ b/tests/lava/proc/event_data/to_frame/test_to_frame.py @@ -17,7 +17,7 @@ from lava.magma.core.model.py.model import PyLoihiProcessModel from lava.magma.core.run_conditions import RunSteps from lava.magma.core.run_configs import Loihi1SimCfg -from lava.proc.event_data.event_pre_processor.sparse_to_dense.sparse_to_dense import SparseToDense, SparseToDensePM +from lava.proc.event_data.to_frame.models import ToFrame, ToFramePM # TODO: add doc strings class RecvDense(AbstractProcess): @@ -73,63 +73,63 @@ def run_spk(self) -> None: class TestProcessSparseToDense(unittest.TestCase): def test_init_2d(self): """Tests instantiation of SparseToDense for a 2D output.""" - sparse_to_dense = SparseToDense(shape_in=(43200,), - shape_out=(240, 180)) + to_frame = ToFrame(shape_in=(43200,), + shape_out=(240, 180)) - self.assertIsInstance(sparse_to_dense, SparseToDense) - self.assertEqual(sparse_to_dense.proc_params["shape_in"], (43200,)) - self.assertEqual(sparse_to_dense.proc_params["shape_out"], (240, 180)) + self.assertIsInstance(to_frame, ToFrame) + self.assertEqual(to_frame.proc_params["shape_in"], (43200,)) + self.assertEqual(to_frame.proc_params["shape_out"], (240, 180)) def test_init_3d(self): """Tests instantiation of SparseToDense for a 3D output.""" - sparse_to_dense = SparseToDense(shape_in=(43200,), - shape_out=(240, 180, 2)) + to_frame = ToFrame(shape_in=(43200,), + shape_out=(240, 180, 2)) - self.assertIsInstance(sparse_to_dense, SparseToDense) - self.assertEqual(sparse_to_dense.proc_params["shape_in"], (43200,)) - self.assertEqual(sparse_to_dense.proc_params["shape_out"], (240, 180, 2)) + self.assertIsInstance(to_frame, ToFrame) + self.assertEqual(to_frame.proc_params["shape_in"], (43200,)) + self.assertEqual(to_frame.proc_params["shape_out"], (240, 180, 2)) def test_invalid_shape_out_throws_exception(self): """Tests whether an exception is thrown when a 1d or 4d value for the shape_out argument is given.""" # TODO: should the 4D+ case rather raise a NotImplementedError? with(self.assertRaises(ValueError)): - SparseToDense(shape_in=(43200,), - shape_out=(240,)) + ToFrame(shape_in=(43200,), + shape_out=(240,)) with(self.assertRaises(ValueError)): - SparseToDense(shape_in=(43200,), - shape_out=(240, 180, 2, 1)) + ToFrame(shape_in=(43200,), + shape_out=(240, 180, 2, 1)) def test_invalid_shape_in_throws_exception(self): """Tests whether a shape_in argument that isn't (n,) throws an exception.""" with(self.assertRaises(ValueError)): - SparseToDense(shape_in=(43200, 1), - shape_out=(240, 180)) + ToFrame(shape_in=(43200, 1), + shape_out=(240, 180)) def test_third_dimension_not_2_throws_exception(self): """Tests whether an exception is thrown if the value of the 3rd dimension for the shape_out argument is not 2.""" with(self.assertRaises(ValueError)): - SparseToDense(shape_in=(43200,), - shape_out=(240, 180, 1)) + ToFrame(shape_in=(43200,), + shape_out=(240, 180, 1)) def test_negative_size_shape_in_throws_exception(self): """Tests whether an exception is thrown when a negative integer for the shape_in argument is given""" with(self.assertRaises(ValueError)): - SparseToDense(shape_in=(-43200,), - shape_out=(240, 180)) + ToFrame(shape_in=(-43200,), + shape_out=(240, 180)) def test_negative_width_or_height_shape_out_throws_exception(self): """Tests whether an exception is thrown when a negative width or height for the shape_out argument is given""" with(self.assertRaises(ValueError)): - SparseToDense(shape_in=(43200,), - shape_out=(-240, 180)) + ToFrame(shape_in=(43200,), + shape_out=(-240, 180)) with(self.assertRaises(ValueError)): - SparseToDense(shape_in=(43200,), - shape_out=(240, -180)) + ToFrame(shape_in=(43200,), + shape_out=(240, -180)) #TODO: add doc strings @@ -140,9 +140,9 @@ def test_init(self): "shape_out": (240, 180) } - pm = SparseToDensePM(proc_params) + pm = ToFramePM(proc_params) - self.assertIsInstance(pm, SparseToDensePM) + self.assertIsInstance(pm, ToFramePM) self.assertEqual(pm._shape_out, proc_params["shape_out"]) # TODO: can be deleted I guess @@ -153,10 +153,10 @@ def test_run(self): indices = np.ravel_multi_index((xs, ys), (8, 8)) send_sparse = SendSparse(shape=(10, ), data=data, indices=indices) - sparse_to_dense = SparseToDense(shape_in=(10, ), + to_frame = ToFrame(shape_in=(10, ), shape_out=(8, 8)) - send_sparse.out_port.connect(sparse_to_dense.in_port) + send_sparse.out_port.connect(to_frame.in_port) # Run parameters num_steps = 1 @@ -164,12 +164,12 @@ def test_run(self): run_cnd = RunSteps(num_steps=num_steps) # Running - sparse_to_dense.run(condition=run_cnd, run_cfg=run_cfg) + to_frame.run(condition=run_cnd, run_cfg=run_cfg) # Stopping - sparse_to_dense.stop() + to_frame.stop() - self.assertFalse(sparse_to_dense.runtime._is_running) + self.assertFalse(to_frame.runtime._is_running) def test_2d(self): data = np.array([1, 1, 1, 1, 1, 1]) @@ -188,12 +188,12 @@ def test_2d(self): expected_data[4, 4] = 1 send_sparse = SendSparse(shape=(10, ), data=data, indices=indices) - sparse_to_dense = SparseToDense(shape_in=(10, ), - shape_out=(8, 8)) + to_frame = ToFrame(shape_in=(10, ), + shape_out=(8, 8)) recv_dense = RecvDense(shape=(8, 8)) - send_sparse.out_port.connect(sparse_to_dense.in_port) - sparse_to_dense.out_port.connect(recv_dense.in_port) + send_sparse.out_port.connect(to_frame.in_port) + to_frame.out_port.connect(recv_dense.in_port) # Run parameters num_steps = 1 @@ -201,13 +201,13 @@ def test_2d(self): run_cnd = RunSteps(num_steps=num_steps) # Running - sparse_to_dense.run(condition=run_cnd, run_cfg=run_cfg) + to_frame.run(condition=run_cnd, run_cfg=run_cfg) sent_and_received_data = \ recv_dense.data.get() # Stopping - sparse_to_dense.stop() + to_frame.stop() np.testing.assert_equal(sent_and_received_data, expected_data) @@ -229,12 +229,12 @@ def test_3d(self): expected_data[4, 4, 0] = 1 send_sparse = SendSparse(shape=(10,), data=data, indices=indices) - sparse_to_dense = SparseToDense(shape_in=(10,), - shape_out=(8, 8, 2)) + to_frame = ToFrame(shape_in=(10,), + shape_out=(8, 8, 2)) recv_dense = RecvDense(shape=(8, 8, 2)) - send_sparse.out_port.connect(sparse_to_dense.in_port) - sparse_to_dense.out_port.connect(recv_dense.in_port) + send_sparse.out_port.connect(to_frame.in_port) + to_frame.out_port.connect(recv_dense.in_port) # Run parameters num_steps = 1 @@ -242,13 +242,13 @@ def test_3d(self): run_cnd = RunSteps(num_steps=num_steps) # Running - sparse_to_dense.run(condition=run_cnd, run_cfg=run_cfg) + to_frame.run(condition=run_cnd, run_cfg=run_cfg) sent_and_received_data = \ recv_dense.data.get() # Stopping - sparse_to_dense.stop() + to_frame.stop() # # TODO : REMOVE THIS AFTER DEBUG # expected_data_im = np.zeros((8, 8)) From be276bd2788f75e34ba231373291e283b09591fa Mon Sep 17 00:00:00 2001 From: "Cohen-Dumani, Joshua" Date: Wed, 7 Dec 2022 17:30:48 +0100 Subject: [PATCH 13/32] WIP --- .../proc/event_data/io/aedat_data_loader.py | 4 +++- src/lava/proc/event_temp/flattening.py | 1 - src/lava/utils/events.py | 23 ++++++++----------- 3 files changed, 13 insertions(+), 15 deletions(-) diff --git a/src/lava/proc/event_data/io/aedat_data_loader.py b/src/lava/proc/event_data/io/aedat_data_loader.py index 35b8eb0b8..cf2ff41a4 100644 --- a/src/lava/proc/event_data/io/aedat_data_loader.py +++ b/src/lava/proc/event_data/io/aedat_data_loader.py @@ -117,7 +117,9 @@ def run_spk(self) -> None: data, indices = self._encode_data_and_indices(events) - data, indices = sub_sample(data, indices, self._shape_out[0], self._seed_sub_sampling) + # If we have more data than our shape allows, subsample + if data.shape[0] > self._shape_out[0]: + data, indices = sub_sample(data, indices, self._shape_out[0], self._seed_sub_sampling) self.out_port.send(data, indices) diff --git a/src/lava/proc/event_temp/flattening.py b/src/lava/proc/event_temp/flattening.py index d8adef62a..4894890a1 100644 --- a/src/lava/proc/event_temp/flattening.py +++ b/src/lava/proc/event_temp/flattening.py @@ -16,7 +16,6 @@ from lava.magma.core.model.py.model import PyLoihiProcessModel - class Flattening(AbstractProcess): def __init__(self, *, diff --git a/src/lava/utils/events.py b/src/lava/utils/events.py index 5018c8399..37af57afa 100644 --- a/src/lava/utils/events.py +++ b/src/lava/utils/events.py @@ -12,18 +12,15 @@ def sub_sample(data: np.ndarray, max_events: int, seed_random: ty.Optional[int] = 0) \ -> ty.Tuple[np.ndarray, np.ndarray]: - # If we have more data than our shape allows, subsample - if data.shape[0] > max_events: - random_rng = np.random.default_rng(seed_random) - data_idx_array = np.arange(0, data.shape[0]) - sampled_idx = random_rng.choice(data_idx_array, - max_events, - replace=False) - percentage_data_lost = (1 - max_events/data.shape[0])*100 - warnings.warn(f"Read {data.shape[0]} events. Maximum number of events is {max_events}. " - f"Removed {data.shape[0] - max_events}({percentage_data_lost}%) events by subsampling.") + random_rng = np.random.default_rng(seed_random) + data_idx_array = np.arange(0, data.shape[0]) + sampled_idx = random_rng.choice(data_idx_array, + max_events, + replace=False) - return data[sampled_idx], indices[sampled_idx] - else: - return data, indices \ No newline at end of file + percentage_data_lost = (1 - max_events/data.shape[0])*100 + warnings.warn(f"Read {data.shape[0]} events. Maximum number of events is {max_events}. " + f"Removed {data.shape[0] - max_events}({percentage_data_lost}%) events by subsampling.") + + return data[sampled_idx], indices[sampled_idx] From 3554e8936f7d754c20fd13f1ca6194fb4d4155a2 Mon Sep 17 00:00:00 2001 From: gkarray Date: Wed, 7 Dec 2022 17:51:01 +0100 Subject: [PATCH 14/32] refactoring event data processes (WIP) --- src/lava/proc/down_sampling/models.py | 47 -------------- src/lava/proc/down_sampling/process.py | 61 ------------------- .../event_data/binary_to_unary/process.py | 5 +- .../proc/event_data/events_to_frame/models.py | 38 ++++++++++++ .../{to_frame => events_to_frame}/process.py | 21 ++++--- .../proc/event_data/io/aedat_data_loader.py | 4 +- src/lava/proc/event_data/to_frame/models.py | 61 ------------------- src/lava/proc/max_pooling/process.py | 16 +---- 8 files changed, 57 insertions(+), 196 deletions(-) delete mode 100644 src/lava/proc/down_sampling/models.py delete mode 100644 src/lava/proc/down_sampling/process.py create mode 100644 src/lava/proc/event_data/events_to_frame/models.py rename src/lava/proc/event_data/{to_frame => events_to_frame}/process.py (67%) delete mode 100644 src/lava/proc/event_data/to_frame/models.py diff --git a/src/lava/proc/down_sampling/models.py b/src/lava/proc/down_sampling/models.py deleted file mode 100644 index 12031d571..000000000 --- a/src/lava/proc/down_sampling/models.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: BSD-3-Clause -# See: https://spdx.org/licenses/ - -import numpy as np -from numpy.lib.stride_tricks import as_strided - -from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol -from lava.magma.core.model.py.ports import PyInPort, PyOutPort -from lava.magma.core.model.py.type import LavaPyType -from lava.magma.core.resources import CPU -from lava.magma.core.decorator import implements, requires -from lava.magma.core.model.py.model import PyLoihiProcessModel -from lava.proc.down_sampling.process import DownSampling -from lava.proc.conv import utils - - -@implements(proc=DownSampling, protocol=LoihiProtocol) -@requires(CPU) -class DownSamplingPM(PyLoihiProcessModel): - in_port: PyInPort = LavaPyType(PyInPort.VEC_DENSE, int) - out_port: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, int) - - stride: np.ndarray = LavaPyType(np.ndarray, np.int8, precision=8) - padding: np.ndarray = LavaPyType(np.ndarray, np.int8, precision=8) - - def run_spk(self) -> None: - data = self.in_port.recv() - - down_sampled_data = self._down_sample(data) - - self.out_port.send(down_sampled_data) - - def _down_sample(self, data: np.ndarray) -> np.ndarray: - output_shape = self.out_port.shape - - padded_data = np.pad(data, - (utils.make_tuple(self.padding[0]), - utils.make_tuple(self.padding[1])), - mode='constant') - - strides_w = (self.stride[0] * data.strides[0], - self.stride[1] * data.strides[1]) - - down_sampled_data = as_strided(padded_data, output_shape, strides_w) - - return down_sampled_data diff --git a/src/lava/proc/down_sampling/process.py b/src/lava/proc/down_sampling/process.py deleted file mode 100644 index 3b246c112..000000000 --- a/src/lava/proc/down_sampling/process.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: BSD-3-Clause -# See: https://spdx.org/licenses/ - -import typing as ty - -from lava.magma.core.process.process import AbstractProcess -from lava.magma.core.process.ports.ports import InPort, OutPort -from lava.magma.core.process.variable import Var -from lava.proc.conv import utils - - -class DownSampling(AbstractProcess): - def __init__( - self, - shape_in: ty.Tuple[int], - stride: ty.Union[int, ty.Tuple[int, int]], - padding: ty.Optional[ty.Union[int, ty.Tuple[int, int]]] = (0, 0), - **kwargs) -> None: - super().__init__(shape_in=shape_in, - stride=stride, - padding=padding, - **kwargs) - - self._validate_shape_in(shape_in) - - in_channels = shape_in[-1] - out_channels = in_channels - - padding = utils.make_tuple(padding) - stride = utils.make_tuple(stride) - - shape_out = utils.output_shape( - shape_in, out_channels, (1, 1), stride, padding, (1, 1) - ) - - self.in_port = InPort(shape=shape_in) - self.out_port = OutPort(shape=shape_out) - self.padding = Var(shape=(2,), init=padding) - self.stride = Var(shape=(2,), init=stride) - - @staticmethod - def _validate_shape_in(shape_in): - if not (len(shape_in) == 2 or len(shape_in) == 3): - raise ValueError(f"shape_in should be 2 or 3 dimensional. " - f"{shape_in} given.") - - if not isinstance(shape_in[0], int) or not isinstance(shape_in[1], int): - raise ValueError(f"Width and height of shape_in should be integers." - f"{shape_in} given.") - if len(shape_in) == 3: - if shape_in[2] != 2: - raise ValueError(f"Third dimension of shape_in should be " - f"equal to 2. " - f"{shape_in} given.") - - if shape_in[0] <= 0 or shape_in[1] <= 0: - raise ValueError(f"Width and height of shape_in should be positive." - f"{shape_in} given.") - - return shape_in diff --git a/src/lava/proc/event_data/binary_to_unary/process.py b/src/lava/proc/event_data/binary_to_unary/process.py index 89e72eb6f..b669f116d 100644 --- a/src/lava/proc/event_data/binary_to_unary/process.py +++ b/src/lava/proc/event_data/binary_to_unary/process.py @@ -40,8 +40,9 @@ def _validate_shape(shape: ty.Tuple[int]) -> None: Shape to validate. """ if len(shape) != 1: - raise ValueError(f"Shape should be (n,). {shape} was given.") + raise ValueError(f"Shape should be of the form (max_num_events, )." + f"{shape} was given.") if shape[0] <= 0: - raise ValueError(f"Max number of events should be positive. " + raise ValueError(f"Max number of events should be positive." f"{shape} was given.") diff --git a/src/lava/proc/event_data/events_to_frame/models.py b/src/lava/proc/event_data/events_to_frame/models.py new file mode 100644 index 000000000..805976774 --- /dev/null +++ b/src/lava/proc/event_data/events_to_frame/models.py @@ -0,0 +1,38 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import numpy as np + +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol +from lava.magma.core.model.py.ports import PyInPort, PyOutPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.resources import CPU +from lava.magma.core.decorator import implements, requires +from lava.magma.core.model.py.model import PyLoihiProcessModel +from lava.proc.event_data.events_to_frame.process import EventsToFrame + + +@implements(proc=EventsToFrame, protocol=LoihiProtocol) +@requires(CPU) +class EventsToFramePM(PyLoihiProcessModel): + in_port: PyInPort = LavaPyType(PyInPort.VEC_SPARSE, int) + out_port: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, int) + + def run_spk(self) -> None: + data, indices = self.in_port.recv() + + dense_data = self._transform(data, indices) + + self.out_port.send(dense_data) + + def _transform(self, data: np.ndarray, indices: np.ndarray) -> np.ndarray: + shape_out = self.out_port.shape + dense_data = np.zeros(shape_out) + + xs, ys = np.unravel_index(indices, shape_out[:-1]) + + dense_data[xs[data == 0], ys[data == 0], 0] = 1 + dense_data[xs[data == 1], ys[data == 1], shape_out[-1] - 1] = 1 + + return dense_data diff --git a/src/lava/proc/event_data/to_frame/process.py b/src/lava/proc/event_data/events_to_frame/process.py similarity index 67% rename from src/lava/proc/event_data/to_frame/process.py rename to src/lava/proc/event_data/events_to_frame/process.py index 22438020d..f28425195 100644 --- a/src/lava/proc/event_data/to_frame/process.py +++ b/src/lava/proc/event_data/events_to_frame/process.py @@ -8,11 +8,11 @@ from lava.magma.core.process.ports.ports import InPort, OutPort -class ToFrame(AbstractProcess): +class EventsToFrame(AbstractProcess): def __init__(self, *, shape_in: ty.Tuple[int], - shape_out: ty.Union[ty.Tuple[int, int], ty.Tuple[int, int, int]], + shape_out: ty.Tuple[int, int, int], **kwargs) -> None: super().__init__(shape_in=shape_in, shape_out=shape_out, @@ -24,6 +24,7 @@ def __init__(self, self.in_port = InPort(shape=shape_in) self.out_port = OutPort(shape=shape_out) + # TODO: Re-write error messages @staticmethod def _validate_shape_in(shape_in: ty.Tuple[int]) -> None: if len(shape_in) != 1: @@ -33,15 +34,15 @@ def _validate_shape_in(shape_in: ty.Tuple[int]) -> None: if shape_in[0] <= 0: raise ValueError(f"Width of shape_in should be positive. {shape_in} given.") + # TODO: Re-write error messages @staticmethod - def _validate_shape_out(shape_out: ty.Union[ty.Tuple[int, int], ty.Tuple[int, int, int]]) -> None: - if not (len(shape_out) == 2 or len(shape_out) == 3): - raise ValueError(f"shape_out should be 2 or 3 dimensional. {shape_out} given.") - - if len(shape_out) == 3: - if shape_out[2] != 2: - raise ValueError(f"Depth of the shape_out argument should be an integer and equal to 2. " - f"{shape_out} given.") + def _validate_shape_out(shape_out: ty.Tuple[int, int, int]) -> None: + if not len(shape_out) == 3: + raise ValueError(f"shape_out should be 3 dimensional. {shape_out} given.") + + if not (shape_out[2] == 1 or shape_out[2] == 2): + raise ValueError(f"Depth of the shape_out argument should be an integer and equal to 2. " + f"{shape_out} given.") if shape_out[0] <= 0 or shape_out[1] <= 0: raise ValueError(f"Width and height of the shape_out argument should be positive. " diff --git a/src/lava/proc/event_data/io/aedat_data_loader.py b/src/lava/proc/event_data/io/aedat_data_loader.py index 35b8eb0b8..dd1824c16 100644 --- a/src/lava/proc/event_data/io/aedat_data_loader.py +++ b/src/lava/proc/event_data/io/aedat_data_loader.py @@ -79,11 +79,11 @@ def _validate_shape_out(shape_out: ty.Tuple[int]) -> None: is not a negative number. Raises relevant exception if not """ if len(shape_out) != 1: - raise ValueError(f"Shape of the OutPort should be (n,). " + raise ValueError(f"Shape of the OutPort should be (n,)." f"{shape_out} was given.") if shape_out[0] <= 0: - raise ValueError(f"Max number of events should be positive. " + raise ValueError(f"Max number of events should be positive." f"{shape_out} was given.") diff --git a/src/lava/proc/event_data/to_frame/models.py b/src/lava/proc/event_data/to_frame/models.py deleted file mode 100644 index 6e8faac00..000000000 --- a/src/lava/proc/event_data/to_frame/models.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: BSD-3-Clause -# See: https://spdx.org/licenses/ - -import numpy as np - -from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol -from lava.magma.core.model.py.ports import PyInPort, PyOutPort -from lava.magma.core.model.py.type import LavaPyType -from lava.magma.core.resources import CPU -from lava.magma.core.decorator import implements, requires -from lava.magma.core.model.py.model import PyLoihiProcessModel -from lava.proc.event_data.to_frame.process import ToFrame - - -@implements(proc=ToFrame, protocol=LoihiProtocol) -@requires(CPU) -class ToFramePM(PyLoihiProcessModel): - in_port: PyInPort = LavaPyType(PyInPort.VEC_SPARSE, int) - out_port: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, int) - - def __init__(self, proc_params: dict) -> None: - super().__init__(proc_params) - self._shape_out = proc_params["shape_out"] - - def run_spk(self) -> None: - data, indices = self.in_port.recv() - - dense_data = self._transform(data, indices) - - self.out_port.send(dense_data) - - def _transform(self, data: np.ndarray, indices: np.ndarray) -> np.ndarray: - if len(self._shape_out) == 2: - return self._transform_2d(data, indices) - elif len(self._shape_out) == 3: - return self._transform_3d(data, indices) - - def _transform_2d(self, - data: np.ndarray, - indices: np.ndarray) -> np.ndarray: - dense_data = np.zeros(self._shape_out) - - xs, ys = np.unravel_index(indices, self._shape_out) - - dense_data[xs[data == 0], ys[data == 0]] = 1 - dense_data[xs[data == 1], ys[data == 1]] = 1 - - return dense_data - - def _transform_3d(self, - data: np.ndarray, - indices: np.ndarray) -> np.ndarray: - dense_data = np.zeros(self._shape_out) - - xs, ys = np.unravel_index(indices, self._shape_out[:-1]) - - dense_data[xs[data == 0], ys[data == 0], 0] = 1 - dense_data[xs[data == 1], ys[data == 1], 1] = 1 - - return dense_data diff --git a/src/lava/proc/max_pooling/process.py b/src/lava/proc/max_pooling/process.py index 30c8e8133..4c1876a08 100644 --- a/src/lava/proc/max_pooling/process.py +++ b/src/lava/proc/max_pooling/process.py @@ -48,19 +48,9 @@ def __init__( self.stride = Var(shape=(2,), init=stride) @staticmethod - def _validate_shape_in(shape_in): - if not (len(shape_in) == 2 or len(shape_in) == 3): - raise ValueError(f"shape_in should be 2 or 3 dimensional. " - f"{shape_in} given.") - - if not isinstance(shape_in[0], int) or not isinstance(shape_in[1], int): - raise ValueError(f"Width and height of shape_in should be integers." - f"{shape_in} given.") - if len(shape_in) == 3: - if shape_in[2] != 2: - raise ValueError(f"Third dimension of shape_in should be " - f"equal to 2. " - f"{shape_in} given.") + def _validate_shape_in(shape_in: ty.Tuple[int, int, int]): + if not len(shape_in) == 3: + raise ValueError(f"shape_in should be 3 dimensional. {shape_in} given.") if shape_in[0] <= 0 or shape_in[1] <= 0: raise ValueError(f"Width and height of shape_in should be positive." From 0c541393da4ef90767c1af21f1b26f5e615bb21d Mon Sep 17 00:00:00 2001 From: "Cohen-Dumani, Joshua" Date: Wed, 7 Dec 2022 20:27:09 +0100 Subject: [PATCH 15/32] added tests for data loader + b-to-u --- .../proc/event_data/io/aedat_data_loader.py | 3 +- src/lava/utils/events.py | 5 +- .../test_binary_to_unary_polarity.py | 61 +++--- .../event_data/io/test_aedat_data_loader.py | 183 +++++++++++------- 4 files changed, 156 insertions(+), 96 deletions(-) diff --git a/src/lava/proc/event_data/io/aedat_data_loader.py b/src/lava/proc/event_data/io/aedat_data_loader.py index cf2ff41a4..32be98ac4 100644 --- a/src/lava/proc/event_data/io/aedat_data_loader.py +++ b/src/lava/proc/event_data/io/aedat_data_loader.py @@ -106,6 +106,7 @@ def __init__(self, proc_params: dict) -> None: self._file["events"].size_y) self._seed_sub_sampling = proc_params["seed_sub_sampling"] + self._random_rng = np.random.default_rng(self._seed_sub_sampling) def run_spk(self) -> None: """ @@ -119,7 +120,7 @@ def run_spk(self) -> None: # If we have more data than our shape allows, subsample if data.shape[0] > self._shape_out[0]: - data, indices = sub_sample(data, indices, self._shape_out[0], self._seed_sub_sampling) + data, indices = sub_sample(data, indices, self._shape_out[0], self._random_rng) self.out_port.send(data, indices) diff --git a/src/lava/utils/events.py b/src/lava/utils/events.py index 37af57afa..cd70e08fe 100644 --- a/src/lava/utils/events.py +++ b/src/lava/utils/events.py @@ -10,10 +10,9 @@ def sub_sample(data: np.ndarray, indices: np.ndarray, max_events: int, - seed_random: ty.Optional[int] = 0) \ + random_rng: ty.Optional[np.random.Generator] = None) \ -> ty.Tuple[np.ndarray, np.ndarray]: - random_rng = np.random.default_rng(seed_random) data_idx_array = np.arange(0, data.shape[0]) sampled_idx = random_rng.choice(data_idx_array, max_events, @@ -21,6 +20,6 @@ def sub_sample(data: np.ndarray, percentage_data_lost = (1 - max_events/data.shape[0])*100 warnings.warn(f"Read {data.shape[0]} events. Maximum number of events is {max_events}. " - f"Removed {data.shape[0] - max_events}({percentage_data_lost}%) events by subsampling.") + f"Removed {data.shape[0] - max_events} ({percentage_data_lost:.1f}%) events by subsampling.") return data[sampled_idx], indices[sampled_idx] diff --git a/tests/lava/proc/event_data/binary_to_unary/test_binary_to_unary_polarity.py b/tests/lava/proc/event_data/binary_to_unary/test_binary_to_unary_polarity.py index 29c672f60..fc85904eb 100644 --- a/tests/lava/proc/event_data/binary_to_unary/test_binary_to_unary_polarity.py +++ b/tests/lava/proc/event_data/binary_to_unary/test_binary_to_unary_polarity.py @@ -17,11 +17,17 @@ from lava.magma.core.model.py.model import PyLoihiProcessModel from lava.magma.core.run_conditions import RunSteps from lava.magma.core.run_configs import Loihi1SimCfg -from lava.proc.event_data.event_pre_processor.sparse_to_sparse.binary_to_unary_polarity import \ - BinaryToUnaryPolarity, BinaryToUnaryPolarityPM +from lava.proc.event_data.binary_to_unary.models import BinaryToUnaryPolarity, BinaryToUnaryPolarityPM + -# TODO: add doc strings for these processes class RecvSparse(AbstractProcess): + """ + Process that receives arbitrary sparse data. + + Parameters + ---------- + shape: tuple, shape of the process + """ def __init__(self, shape: ty.Tuple[int]) -> None: super().__init__(shape=shape) @@ -41,6 +47,9 @@ class PyRecvSparsePM(PyLoihiProcessModel): idx: np.ndarray = LavaPyType(np.ndarray, int) def run_spk(self) -> None: + """ + Receives the data and pads with zeros to enable access with get() + """ data, idx = self.in_port.recv() self.data = np.pad(data, @@ -52,6 +61,13 @@ def run_spk(self) -> None: class SendSparse(AbstractProcess): + """ + Process that sends arbitrary sparse data. + + Parameters + ---------- + shape: tuple, shape of the process + """ def __init__(self, shape: ty.Tuple[int], data: np.ndarray, @@ -109,7 +125,6 @@ def test_init(self): self.assertIsInstance(pm, BinaryToUnaryPolarityPM) def test_binary_to_unary_polarity_encoding(self): - # TODO: add explanations for the meaning of binary and unary somewhere? explain test variables? """Tests whether the encoding from binary to unary works correctly.""" data = np.array([1, 1, 1, 0, 1, 0, 0, 1, 0]) indices = np.array([1, 5, 4, 3, 3, 2, 0, 1, 0]) @@ -147,25 +162,25 @@ def test_binary_to_unary_polarity_encoding(self): expected_indices) # TODO: I guess not needed? should any edge cases be treated? - def test_run(self): - data = np.array([1, 1, 1, 0, 1, 0, 0, 1, 0]) - indices = np.array([1, 5, 4, 3, 3, 2, 0, 1, 0]) - - send_sparse = SendSparse(shape=(10,), data=data, indices=indices) - binary_to_unary_encoder = BinaryToUnaryPolarity(shape=(10,)) - - send_sparse.out_port.connect(binary_to_unary_encoder.in_port) - - # Run parameters - run_cfg = Loihi1SimCfg() - run_cnd = RunSteps(num_steps=1) - - # Running - binary_to_unary_encoder.run(condition=run_cnd, run_cfg=run_cfg) - - binary_to_unary_encoder.stop() - - self.assertFalse(binary_to_unary_encoder.runtime._is_running) + # def test_run(self): + # data = np.array([1, 1, 1, 0, 1, 0, 0, 1, 0]) + # indices = np.array([1, 5, 4, 3, 3, 2, 0, 1, 0]) + # + # send_sparse = SendSparse(shape=(10,), data=data, indices=indices) + # binary_to_unary_encoder = BinaryToUnaryPolarity(shape=(10,)) + # + # send_sparse.out_port.connect(binary_to_unary_encoder.in_port) + # + # # Run parameters + # run_cfg = Loihi1SimCfg() + # run_cnd = RunSteps(num_steps=1) + # + # # Running + # binary_to_unary_encoder.run(condition=run_cnd, run_cfg=run_cfg) + # + # binary_to_unary_encoder.stop() + # + # self.assertFalse(binary_to_unary_encoder.runtime._is_running) if __name__ == '__main__': diff --git a/tests/lava/proc/event_data/io/test_aedat_data_loader.py b/tests/lava/proc/event_data/io/test_aedat_data_loader.py index 8d39929b2..4f3307ac0 100644 --- a/tests/lava/proc/event_data/io/test_aedat_data_loader.py +++ b/tests/lava/proc/event_data/io/test_aedat_data_loader.py @@ -9,11 +9,11 @@ import unittest from lava.magma.core.process.process import AbstractProcess -from lava.magma.core.process.ports.ports import InPort, OutPort +from lava.magma.core.process.ports.ports import InPort from lava.magma.core.process.variable import Var from lava.magma.core.resources import CPU from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol -from lava.magma.core.model.py.ports import PyInPort, PyOutPort +from lava.magma.core.model.py.ports import PyInPort from lava.magma.core.model.py.type import LavaPyType from lava.magma.core.decorator import implements, requires from lava.magma.core.model.py.model import PyLoihiProcessModel @@ -49,8 +49,7 @@ class PyRecvSparsePM(PyLoihiProcessModel): def run_spk(self) -> None: """ - Receives the data and pads with zeros to be able to access it with - Lava Vars. + Receives the data and pads with zeros to enable access with get(). """ data, idx = self.in_port.recv() @@ -131,7 +130,6 @@ def test_run_without_sub_sampling(self): """ Tests whether running yields the expectde behavior, given that the user parameters are all correct. - TODO: implement this test, show functionality without sub-sampling """ data_history = [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], @@ -149,9 +147,8 @@ def test_run_without_sub_sampling(self): [1910, 1382, 1909, 1562, 1606, 1381], [464] ] - seed_rng = 0 - rng = np.random.default_rng(seed=seed_rng) + seed_rng = 0 max_num_events = 15 data_loader = AedatDataLoader(file_path="../dvs_recording.aedat4", shape_out=(max_num_events,), @@ -191,34 +188,23 @@ def test_sub_sampling(self): that is smaller than the amount of events we receive in a given batch (i.e. the process will sub-sample correctly). """ - data_history = [ - [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], + expected_data = [ + [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1], [0] ] - indices_history = [ - [1597, 2308, 2486, 2496, 2498, 1787, 2642, 2633, 2489, - 2488, 1596, 1729, 1727, 2500, 1780], - [1600, 1732, 2297, 1388, 2290, 2305, 3704, 3519, 1911], - [7138, 2301, 2471, 1601, 2982, 1364, 1379, 1386, 1384, - 2983, 1390, 2289, 1401, 1362, 2293], - [1910, 1382, 1909, 1562, 1606, 1381], - [464] - ] - seed_rng = 0 - rng = np.random.default_rng(seed=seed_rng) - - expected_data = [ - [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] - ] expected_indices = [ [1787, 1780, 2498, 2633, 2486, 1597, 1727, 2496, 2500, 1729], - [1600, 1732, 2297, 1388, 2290, 2305, 3704, 3519, 1911,] + [1600, 1732, 2297, 1388, 2290, 2305, 3704, 3519, 1911], + [1362, 1384, 7138, 1379, 1390, 2982, 1364, 2301, 2289, 1386], + [1910, 1382, 1909, 1562, 1606, 1381], + [464] ] + seed_rng = 0 max_num_events = 10 data_loader = AedatDataLoader(file_path="../dvs_recording.aedat4", shape_out=(max_num_events,), @@ -228,7 +214,7 @@ def test_sub_sampling(self): data_loader.out_port.connect(recv_sparse.in_port) # Run parameters - num_steps = 2 + num_steps = 5 run_cfg = Loihi1SimCfg() run_cnd = RunSteps(num_steps=1) @@ -236,30 +222,15 @@ def test_sub_sampling(self): for i in range(num_steps): data_loader.run(condition=run_cnd, run_cfg=run_cfg) - expected_data = np.array(data_history[i]) - expected_indices = np.array(indices_history[i]) - sent_and_received_data = \ - recv_sparse.data.get()[:expected_data.shape[0]] + recv_sparse.data.get()[:len(expected_data[i])] sent_and_received_indices = \ - recv_sparse.idx.get()[:expected_indices.shape[0]] - - if expected_data.shape[0] > max_num_events: - data_idx_array = np.arange(0, expected_data.shape[0]) - sampled_idx = rng.choice(data_idx_array, - max_num_events, - replace=False) - - expected_data = expected_data[sampled_idx] - expected_indices = expected_indices[sampled_idx] + recv_sparse.idx.get()[:len(expected_indices[i])] np.testing.assert_equal(sent_and_received_data, - expected_data) + expected_data[i]) np.testing.assert_equal(sent_and_received_indices, - expected_indices) - - print(f"data timestep {i}: ", sent_and_received_data) - print(f"indices timestep {i}: ", sent_and_received_indices) + expected_indices[i]) # Stopping data_loader.stop() @@ -267,37 +238,120 @@ def test_sub_sampling(self): def test_sub_sampling_seed(self): """ Tests whether using different seeds does indeed result in different samples. + TODO: would testing on only 1 timestep be sufficient? """ - # TODO: implement this - pass + expected_indices_seed_0 = [ + [1787, 1780, 2498, 2633, 2486, 1597, 1727, 2496, 2500, 1729], + [1600, 1732, 2297, 1388, 2290, 2305, 3704, 3519, 1911], + [1362, 1384, 7138, 1379, 1390, 2982, 1364, 2301, 2289, 1386], + [1910, 1382, 1909, 1562, 1606, 1381], + [464] + ] + + expected_indices_seed_1 = [ + [2498, 2486, 2488, 1597, 1727, 2496, 2308, 2642, 2489, 2500], + [1600, 1732, 2297, 1388, 2290, 2305, 3704, 3519, 1911], + [1401, 1390, 1386, 1364, 2289, 7138, 1601, 2301, 1379, 1384], + [1910, 1382, 1909, 1562, 1606, 1381], + [464] + ] + sent_and_received_indices_1 = [] + sent_and_received_indices_2 = [] + + max_num_events = 10 + seed_rng_run_1 = 0 + seed_rng_run_2 = 1 + + data_loader_1 = AedatDataLoader(file_path="../dvs_recording.aedat4", + shape_out=(max_num_events,), + seed_sub_sampling=seed_rng_run_1) + data_loader_2 = AedatDataLoader(file_path="../dvs_recording.aedat4", + shape_out=(max_num_events,), + seed_sub_sampling=seed_rng_run_2) + + recv_sparse_1 = RecvSparse(shape=(max_num_events,)) + recv_sparse_2 = RecvSparse(shape=(max_num_events,)) + + data_loader_1.out_port.connect(recv_sparse_1.in_port) + data_loader_2.out_port.connect(recv_sparse_2.in_port) + + num_steps = 5 + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=1) + + for i in range(num_steps): + data_loader_1.run(condition=run_cnd, run_cfg=run_cfg) + + sent_and_received_indices_1.append\ + (recv_sparse_1.idx.get()[:len(expected_indices_seed_1[i])]) + + np.testing.assert_equal(sent_and_received_indices_1, + expected_indices_seed_0) + + data_loader_1.stop() + + for i in range(num_steps): + data_loader_2.run(condition=run_cnd, run_cfg=run_cfg) + + sent_and_received_indices_2.append\ + (recv_sparse_2.idx.get()[:len(expected_indices_seed_1[i])]) + + np.testing.assert_equal(sent_and_received_indices_2, + expected_indices_seed_1) + + data_loader_2.stop() def test_end_of_file(self): """ Tests whether we loop back to the beginning of the event stream when we reach - the end of the aedat4 file. - TODO: implement this. dvs_recording.aedat4 should be 30 timesteps long. + the end of the aedat4 file. The test file contains 27 time-steps. """ + data_time_steps_1_to_5 = [] + data_time_steps_28_to_32 = [] + indices_time_steps_1_to_5 = [] + indices_time_steps_28_to_32 = [] + + seed_rng = 0 + max_num_events = 15 data_loader = AedatDataLoader(file_path="../dvs_recording.aedat4", - shape_out=(3000,), - seed_sub_sampling=0) + shape_out=(max_num_events,), + seed_sub_sampling=seed_rng) + recv_sparse = RecvSparse(shape=(max_num_events,)) + + data_loader.out_port.connect(recv_sparse.in_port) # Run parameters - num_steps = 10 + num_steps = 32 run_cfg = Loihi1SimCfg() - run_cnd = RunSteps(num_steps=num_steps) + run_cnd = RunSteps(num_steps=1) # Running - data_loader.run(condition=run_cnd, run_cfg=run_cfg) + for i in range(num_steps): + data_loader.run(condition=run_cnd, run_cfg=run_cfg) + # get data from the first 5 timesteps + if i in range(5): + data_time_steps_1_to_5.append\ + (recv_sparse.data.get()) + indices_time_steps_1_to_5.append\ + (recv_sparse.idx.get()) + + # get data from timesteps 28-32 + if i in range(27,32): + data_time_steps_28_to_32.append\ + (recv_sparse.data.get()) + indices_time_steps_28_to_32.append\ + (recv_sparse.idx.get()) + + np.testing.assert_equal(data_time_steps_1_to_5, data_time_steps_28_to_32) + np.testing.assert_equal(indices_time_steps_1_to_5, indices_time_steps_28_to_32) # Stopping data_loader.stop() - self.assertFalse(data_loader.runtime._is_running) - def test_index_encoding(self): """ - Tests whether indices are correctly calculated given x and y coordinates. - TODO: have less timesteps? maybe 2-3 (show it works for multiple timesteps with multiple sizes)? + Tests whether indices are correctly calculated during the process. + TODO: have less timesteps? maybe 2? (show it works for multiple timesteps with multiple sizes)? no difference in runtime """ x_history = [ [8, 12, 13, 13, 13, 9, 14, 14, 13, 13, 8, 9, 9, 13, 9], @@ -327,7 +381,7 @@ def test_index_encoding(self): rng = np.random.default_rng(seed=seed_rng) dense_shape = (240, 180) - max_num_events = 10 + max_num_events = 15 data_loader = AedatDataLoader(file_path="../dvs_recording.aedat4", shape_out=(max_num_events,), seed_sub_sampling=seed_rng) @@ -353,15 +407,6 @@ def test_index_encoding(self): reconstructed_xs, reconstructed_ys = \ np.unravel_index(sent_and_received_indices, dense_shape) - if expected_xs.shape[0] > max_num_events: - data_idx_array = np.arange(0, expected_xs.shape[0]) - sampled_idx = rng.choice(data_idx_array, - max_num_events, - replace=False) - - expected_xs = expected_xs[sampled_idx] - expected_ys = expected_ys[sampled_idx] - np.testing.assert_equal(reconstructed_xs, expected_xs) np.testing.assert_equal(reconstructed_ys, expected_ys) From 1bbcc66d1ed94331d25b6879b1dfd8fab527fc4b Mon Sep 17 00:00:00 2001 From: gkarray Date: Wed, 7 Dec 2022 20:48:37 +0100 Subject: [PATCH 16/32] adding complete unit test suite for MaxPooling --- src/lava/proc/max_pooling/models.py | 36 +- src/lava/proc/max_pooling/process.py | 26 +- .../down_sampling/test_down_sampling_dense.py | 325 ------------------ .../__init__.py | 0 tests/lava/proc/max_pooling/test_models.py | 300 ++++++++++++++++ tests/lava/proc/max_pooling/test_process.py | 63 ++++ 6 files changed, 412 insertions(+), 338 deletions(-) delete mode 100644 tests/lava/proc/down_sampling/test_down_sampling_dense.py rename tests/lava/proc/{down_sampling => max_pooling}/__init__.py (100%) create mode 100644 tests/lava/proc/max_pooling/test_models.py create mode 100644 tests/lava/proc/max_pooling/test_process.py diff --git a/src/lava/proc/max_pooling/models.py b/src/lava/proc/max_pooling/models.py index 5f0cba467..55832200a 100644 --- a/src/lava/proc/max_pooling/models.py +++ b/src/lava/proc/max_pooling/models.py @@ -4,6 +4,7 @@ import numpy as np from numpy.lib.stride_tricks import as_strided +import typing as ty from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol from lava.magma.core.model.py.ports import PyInPort, PyOutPort @@ -33,21 +34,36 @@ def run_spk(self) -> None: self.out_port.send(max_pooled_data) def _max_pooling(self, data: np.ndarray) -> np.ndarray: - output_shape = self.out_port.shape + result = np.zeros(self.out_port.shape) + for channel in range(self.out_port.shape[-1]): + result[:, :, channel] = \ + self._max_pooling_2d(data[:, :, channel], + output_shape=self.out_port.shape[:-1], + kernel_size=self.kernel_size, + stride=self.stride, + padding=self.padding) + + return result + + @staticmethod + def _max_pooling_2d(data: np.ndarray, + output_shape: ty.Tuple[int, int], + kernel_size: np.ndarray, + stride: np.ndarray, + padding: np.ndarray) -> np.ndarray: padded_data = np.pad(data, - (utils.make_tuple(self.padding[0]), - utils.make_tuple(self.padding[1])), - mode='constant') + (padding[0], padding[1]), + mode='constant').copy() shape_w = (output_shape[0], output_shape[1], - self.kernel_size[0], - self.kernel_size[1]) - strides_w = (self.stride[0] * data.strides[0], - self.stride[1] * data.strides[1], - data.strides[0], - data.strides[1]) + kernel_size[0], + kernel_size[1]) + strides_w = (stride[0] * padded_data.strides[0], + stride[1] * padded_data.strides[1], + padded_data.strides[0], + padded_data.strides[1]) pooled_data = as_strided(padded_data, shape_w, strides_w) max_pooled_data = pooled_data.max(axis=(2, 3)) diff --git a/src/lava/proc/max_pooling/process.py b/src/lava/proc/max_pooling/process.py index 4c1876a08..30522f274 100644 --- a/src/lava/proc/max_pooling/process.py +++ b/src/lava/proc/max_pooling/process.py @@ -34,8 +34,12 @@ def __init__( out_channels = in_channels kernel_size = utils.make_tuple(kernel_size) - padding = utils.make_tuple(padding) stride = utils.make_tuple(stride) + padding = utils.make_tuple(padding) + + self._validate_kernel_size(kernel_size) + self._validate_stride(stride) + self._validate_padding(padding) shape_out = utils.output_shape( shape_in, out_channels, kernel_size, stride, padding, (1, 1) @@ -48,7 +52,7 @@ def __init__( self.stride = Var(shape=(2,), init=stride) @staticmethod - def _validate_shape_in(shape_in: ty.Tuple[int, int, int]): + def _validate_shape_in(shape_in: ty.Tuple[int, int, int]) -> None: if not len(shape_in) == 3: raise ValueError(f"shape_in should be 3 dimensional. {shape_in} given.") @@ -56,4 +60,20 @@ def _validate_shape_in(shape_in: ty.Tuple[int, int, int]): raise ValueError(f"Width and height of shape_in should be positive." f"{shape_in} given.") - return shape_in + @staticmethod + def _validate_kernel_size(kernel_size: ty.Tuple[int, int]) -> None: + if kernel_size[0] <= 0 or kernel_size[1] <= 0: + raise ValueError(f"Kernel size elements should be strictly positive." + f"{kernel_size=} found.") + + @staticmethod + def _validate_stride(stride: ty.Tuple[int, int]) -> None: + if stride[0] <= 0 or stride[1] <= 0: + raise ValueError(f"Stride elements should be strictly positive." + f"{stride=} found.") + + @staticmethod + def _validate_padding(padding: ty.Tuple[int, int]) -> None: + if padding[0] < 0 or padding[1] < 0: + raise ValueError(f"Padding elements should be positive." + f"{padding=} found.") \ No newline at end of file diff --git a/tests/lava/proc/down_sampling/test_down_sampling_dense.py b/tests/lava/proc/down_sampling/test_down_sampling_dense.py deleted file mode 100644 index 861c7a5d8..000000000 --- a/tests/lava/proc/down_sampling/test_down_sampling_dense.py +++ /dev/null @@ -1,325 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: BSD-3-Clause -# See: https://spdx.org/licenses/ - -import numpy as np -import unittest - -from lava.magma.core.process.process import AbstractProcess -from lava.magma.core.process.ports.ports import InPort, OutPort -from lava.magma.core.process.variable import Var -from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol -from lava.magma.core.model.py.ports import PyInPort, PyOutPort -from lava.magma.core.model.py.type import LavaPyType -from lava.magma.core.resources import CPU -from lava.magma.core.decorator import implements, requires -from lava.magma.core.model.py.model import PyLoihiProcessModel -from lava.magma.core.run_conditions import RunSteps -from lava.magma.core.run_configs import Loihi1SimCfg -from lava.proc.down_sampling.models import DownSampling, DownSamplingPM - - -class RecvDense(AbstractProcess): - def __init__(self, - shape: tuple) -> None: - super().__init__(shape=shape) - - self.in_port = InPort(shape=shape) - - self.data = Var(shape=shape, init=np.zeros(shape, dtype=int)) - - -@implements(proc=RecvDense, protocol=LoihiProtocol) -@requires(CPU) -class PyRecvDensePM(PyLoihiProcessModel): - in_port: PyInPort = LavaPyType(PyInPort.VEC_DENSE, int) - - data: np.ndarray = LavaPyType(np.ndarray, int) - - def run_spk(self) -> None: - data = self.in_port.recv() - - self.data = data - - -class SendDense(AbstractProcess): - def __init__(self, - shape: tuple, - data: np.ndarray) -> None: - super().__init__(shape=shape, data=data) - - self.out_port = OutPort(shape=shape) - - -@implements(proc=SendDense, protocol=LoihiProtocol) -@requires(CPU) -class PySendDensePM(PyLoihiProcessModel): - out_port: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, int) - - def __init__(self, proc_params): - super().__init__(proc_params) - self._data = proc_params["data"] - - def run_spk(self) -> None: - data = self._data - - self.out_port.send(data) - - -class TestProcessDownSamplingDense(unittest.TestCase): - def test_init(self): - """Tests instantiation of DownSamplingDense.""" - down_sampler = DownSamplingDense(shape_in=(240, 180), - down_sampling_method=DownSamplingMethodDense.CONVOLUTION, - down_sampling_factor=8) - - self.assertIsInstance(down_sampler, DownSamplingDense) - self.assertEqual(down_sampler.proc_params["shape_in"], (240, 180)) - self.assertEqual(down_sampler.proc_params["down_sampling_method"], DownSamplingMethodDense.CONVOLUTION) - self.assertEqual(down_sampler.proc_params["down_sampling_factor"], 8) - - def test_invalid_shape_in_negative_width_or_height(self): - """Checks if an error is raised when a negative width or height - for shape_in is given.""" - with(self.assertRaises(ValueError)): - _ = DownSamplingDense(shape_in=(-240, 180), - down_sampling_method=DownSamplingMethodDense.CONVOLUTION, - down_sampling_factor=8) - - with(self.assertRaises(ValueError)): - _ = DownSamplingDense(shape_in=(240, -180), - down_sampling_method=DownSamplingMethodDense.CONVOLUTION, - down_sampling_factor=8) - - def test_invalid_shape_in_decimal_width_or_height(self): - """Checks if an error is raised when a decimal width or height - for shape_in is given.""" - with(self.assertRaises(ValueError)): - _ = DownSamplingDense(shape_in=(240.5, 180), - down_sampling_method=DownSamplingMethodDense.CONVOLUTION, - down_sampling_factor=8) - - with(self.assertRaises(ValueError)): - _ = DownSamplingDense(shape_in=(240, 180.5), - down_sampling_method=DownSamplingMethodDense.CONVOLUTION, - down_sampling_factor=8) - - def test_invalid_shape_in_dimension(self): - """Checks if an error is raised when a 1d or 4d input shape is given.""" - with(self.assertRaises(ValueError)): - _ = DownSamplingDense(shape_in=(240,), - down_sampling_method=DownSamplingMethodDense.CONVOLUTION, - down_sampling_factor=8) - - with(self.assertRaises(ValueError)): - _ = DownSamplingDense(shape_in=(240, 180, 2, 1), - down_sampling_method=DownSamplingMethodDense.CONVOLUTION, - down_sampling_factor=8) - - def test_invalid_shape_in_third_dimension_not_2(self): - """Checks if an error is raised if the value of the 3rd dimension - for the shape_in parameter is not 2.""" - with(self.assertRaises(ValueError)): - _ = DownSamplingDense(shape_in=(240, 180, 1), - down_sampling_method=DownSamplingMethodDense.CONVOLUTION, - down_sampling_factor=8) - - def test_invalid_down_sampling_factor_negative(self): - """Checks if an error is raised if the given down sampling factor - is negative.""" - with(self.assertRaises(ValueError)): - _ = DownSamplingDense(shape_in=(240, 180), - down_sampling_method=DownSamplingMethodDense.CONVOLUTION, - down_sampling_factor=-8) - - def test_invalid_down_sampling_factor_decimal(self): - """Checks if an error is raised if the given down sampling factor is decimal.""" - with(self.assertRaises(ValueError)): - _ = DownSamplingDense(shape_in=(240, 180), - down_sampling_method=DownSamplingMethodDense.CONVOLUTION, - down_sampling_factor=8.5) - - def test_invalid_down_sampling_method(self): - """Checks if an error is raised if the given down sampling method is not of type - DownSamplingMethodDense.""" - with(self.assertRaises(TypeError)): - _ = DownSamplingDense(shape_in=(240, 180), - down_sampling_method="convolution", - down_sampling_factor=8) - - -# TODO (GK): Add tests for widths and heights not divisible by -# TODO (GK): down_sampling_factor -class TestProcessModelDownSamplingDense(unittest.TestCase): - def test_init(self): - proc_params = { - "shape_in": (240, 180), - "down_sampling_method": DownSamplingMethodDense.SKIPPING, - "down_sampling_factor": 8 - } - - pm = DownSamplingDensePM(proc_params) - - self.assertIsInstance(pm, DownSamplingDensePM) - self.assertEqual(pm._shape_in, proc_params["shape_in"]) - self.assertEqual(pm._down_sampling_method, - proc_params["down_sampling_method"]) - self.assertEqual(pm._down_sampling_factor, - proc_params["down_sampling_factor"]) - - def test_run(self): - data = np.zeros((8, 8)) - - send_dense = SendDense(shape=(8, 8), data=data) - down_sampler = DownSamplingDense(shape_in=(8, 8), - down_sampling_method=DownSamplingMethodDense.SKIPPING, - down_sampling_factor=2) - - send_dense.out_port.connect(down_sampler.in_port) - - # Run parameters - num_steps = 1 - run_cfg = Loihi1SimCfg() - run_cnd = RunSteps(num_steps=num_steps) - - # Running - down_sampler.run(condition=run_cnd, run_cfg=run_cfg) - - # Stopping - down_sampler.stop() - - self.assertFalse(down_sampler.runtime._is_running) - - def test_down_sampling_skipping(self): - data = np.zeros((8, 8)) - data[0, 0] = 1 - data[1, 2] = 1 - data[2, 1] = 1 - - data[1, 5] = 1 - data[2, 7] = 1 - - data[4, 4] = 1 - - expected_data = np.zeros((2, 2)) - expected_data[0, 0] = 1 - expected_data[1, 1] = 1 - - send_dense = SendDense(shape=(8, 8), data=data) - down_sampler = DownSamplingDense(shape_in=(8, 8), - down_sampling_method=DownSamplingMethodDense.SKIPPING, - down_sampling_factor=4) - recv_dense = RecvDense(shape=(2, 2)) - - send_dense.out_port.connect(down_sampler.in_port) - down_sampler.out_port.connect(recv_dense.in_port) - - # Run parameters - run_cfg = Loihi1SimCfg() - run_cnd = RunSteps(num_steps=1) - - # Running - send_dense.run(condition=run_cnd, run_cfg=run_cfg) - - sent_and_received_data = \ - recv_dense.data.get() - - send_dense.stop() - - np.testing.assert_equal(sent_and_received_data, - expected_data) - - def test_down_sampling_max_pooling(self): - data = np.zeros((8, 8)) - data[0, 0] = 1 - data[1, 2] = 1 - data[2, 1] = 1 - - data[1, 5] = 1 - data[2, 7] = 1 - - data[4, 4] = 1 - - expected_data = np.zeros((2, 2)) - expected_data[0, 0] = 1 - expected_data[0, 1] = 1 - expected_data[1, 1] = 1 - - send_dense = SendDense(shape=(8, 8), data=data) - down_sampler = DownSamplingDense(shape_in=(8, 8), - down_sampling_method=DownSamplingMethodDense.MAX_POOLING, - down_sampling_factor=4) - recv_dense = RecvDense(shape=(2, 2)) - - send_dense.out_port.connect(down_sampler.in_port) - down_sampler.out_port.connect(recv_dense.in_port) - - # Run parameters - run_cfg = Loihi1SimCfg() - run_cnd = RunSteps(num_steps=1) - - # Running - send_dense.run(condition=run_cnd, run_cfg=run_cfg) - - sent_and_received_data = \ - recv_dense.data.get() - - send_dense.stop() - - np.testing.assert_equal(sent_and_received_data, - expected_data) - - def test_down_sampling_convolution(self): - data = np.zeros((8, 8)) - data[0, 0] = 1 - data[1, 2] = 1 - data[2, 1] = 1 - - data[1, 5] = 1 - data[2, 7] = 1 - - data[4, 4] = 1 - - expected_data = np.zeros((2, 2)) - expected_data[0, 0] = 3 - expected_data[0, 1] = 2 - expected_data[1, 1] = 1 - - send_dense = SendDense(shape=(8, 8), data=data) - down_sampler = DownSamplingDense(shape_in=(8, 8), - down_sampling_method=DownSamplingMethodDense.CONVOLUTION, - down_sampling_factor=4) - recv_dense = RecvDense(shape=(2, 2)) - - send_dense.out_port.connect(down_sampler.in_port) - down_sampler.out_port.connect(recv_dense.in_port) - - # Run parameters - run_cfg = Loihi1SimCfg() - run_cnd = RunSteps(num_steps=1) - - # Running - send_dense.run(condition=run_cnd, run_cfg=run_cfg) - - sent_and_received_data = \ - recv_dense.data.get() - - send_dense.stop() - - # TODO : REMOVE THIS AFTER DEBUG - fig, (ax1, ax2, ax3) = plt.subplots(1, 3) - fig.suptitle('Max pooling') - ax1.imshow(data) - ax1.set_title("Data") - ax2.imshow(expected_data) - ax2.set_title("Expected data") - ax3.imshow(sent_and_received_data) - ax3.set_title("Actual data") - fig.show() - - np.testing.assert_equal(sent_and_received_data, - expected_data) - - -if __name__ == '__main__': - unittest.main() diff --git a/tests/lava/proc/down_sampling/__init__.py b/tests/lava/proc/max_pooling/__init__.py similarity index 100% rename from tests/lava/proc/down_sampling/__init__.py rename to tests/lava/proc/max_pooling/__init__.py diff --git a/tests/lava/proc/max_pooling/test_models.py b/tests/lava/proc/max_pooling/test_models.py new file mode 100644 index 000000000..253b85d3e --- /dev/null +++ b/tests/lava/proc/max_pooling/test_models.py @@ -0,0 +1,300 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import numpy as np +import unittest + +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.process.ports.ports import InPort, OutPort +from lava.magma.core.process.variable import Var +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol +from lava.magma.core.model.py.ports import PyInPort, PyOutPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.resources import CPU +from lava.magma.core.decorator import implements, requires +from lava.magma.core.model.py.model import PyLoihiProcessModel +from lava.magma.core.run_conditions import RunSteps +from lava.magma.core.run_configs import Loihi1SimCfg +from lava.proc.max_pooling.process import MaxPooling +from lava.proc.max_pooling.models import MaxPoolingPM + + +class RecvDense(AbstractProcess): + def __init__(self, + shape: tuple) -> None: + super().__init__(shape=shape) + + self.in_port = InPort(shape=shape) + + self.data = Var(shape=shape, init=np.zeros(shape, dtype=int)) + + +@implements(proc=RecvDense, protocol=LoihiProtocol) +@requires(CPU) +class PyRecvDensePM(PyLoihiProcessModel): + in_port: PyInPort = LavaPyType(PyInPort.VEC_DENSE, int) + + data: np.ndarray = LavaPyType(np.ndarray, int) + + def run_spk(self) -> None: + data = self.in_port.recv() + + self.data = data + + +class SendDense(AbstractProcess): + def __init__(self, + shape: tuple, + data: np.ndarray) -> None: + super().__init__(shape=shape, data=data) + + self.out_port = OutPort(shape=shape) + + +@implements(proc=SendDense, protocol=LoihiProtocol) +@requires(CPU) +class PySendDensePM(PyLoihiProcessModel): + out_port: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, int) + + def __init__(self, proc_params): + super().__init__(proc_params) + self._data = proc_params["data"] + + def run_spk(self) -> None: + data = self._data + + self.out_port.send(data) + + +class TestProcessModelMaxPooling(unittest.TestCase): + def test_init(self): + pm = MaxPoolingPM() + + self.assertIsInstance(pm, MaxPoolingPM) + + def test_max_pooling(self): + data = np.zeros((8, 8, 1)) + data[0, 0, 0] = 1 + data[1, 2, 0] = 1 + data[2, 1, 0] = 1 + + data[1, 5, 0] = 1 + data[2, 7, 0] = 1 + + data[4, 7, 0] = 1 + + expected_data = np.zeros((2, 2, 1)) + expected_data[0, 0, 0] = 1 + expected_data[0, 1, 0] = 1 + expected_data[1, 1, 0] = 1 + + send_dense = SendDense(shape=(8, 8, 1), data=data) + down_sampler = MaxPooling(shape_in=(8, 8, 1), + kernel_size=4) + recv_dense = RecvDense(shape=(2, 2, 1)) + + send_dense.out_port.connect(down_sampler.in_port) + down_sampler.out_port.connect(recv_dense.in_port) + + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=1) + + send_dense.run(condition=run_cnd, run_cfg=run_cfg) + + sent_and_received_data = \ + recv_dense.data.get() + + send_dense.stop() + + np.testing.assert_equal(sent_and_received_data, + expected_data) + + def test_max_pooling_2_channels(self): + data = np.zeros((8, 8, 2)) + data[0, 0, 0] = 1 + data[1, 2, 0] = 1 + data[2, 1, 0] = 1 + + data[1, 5, 1] = 1 + data[2, 7, 1] = 1 + + data[4, 7, 0] = 1 + + expected_data = np.zeros((2, 2, 2)) + expected_data[0, 0, 0] = 1 + expected_data[0, 1, 1] = 1 + expected_data[1, 1, 0] = 1 + + send_dense = SendDense(shape=(8, 8, 2), data=data) + down_sampler = MaxPooling(shape_in=(8, 8, 2), + kernel_size=4) + recv_dense = RecvDense(shape=(2, 2, 2)) + + send_dense.out_port.connect(down_sampler.in_port) + down_sampler.out_port.connect(recv_dense.in_port) + + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=1) + + send_dense.run(condition=run_cnd, run_cfg=run_cfg) + + sent_and_received_data = \ + recv_dense.data.get() + + send_dense.stop() + + np.testing.assert_almost_equal(sent_and_received_data, + expected_data) + + def test_max_pooling_shape_non_divisible_by_kernel_size(self): + data = np.zeros((9, 9, 1)) + data[0, 0, 0] = 1 + data[1, 2, 0] = 1 + data[2, 1, 0] = 1 + + data[1, 5, 0] = 1 + data[2, 7, 0] = 1 + + data[4, 8, 0] = 1 + + expected_data = np.zeros((2, 2, 1)) + expected_data[0, 0, 0] = 1 + expected_data[0, 1, 0] = 1 + + send_dense = SendDense(shape=(9, 9, 1), data=data) + down_sampler = MaxPooling(shape_in=(9, 9, 1), + kernel_size=4) + recv_dense = RecvDense(shape=(2, 2, 1)) + + send_dense.out_port.connect(down_sampler.in_port) + down_sampler.out_port.connect(recv_dense.in_port) + + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=1) + + send_dense.run(condition=run_cnd, run_cfg=run_cfg) + + sent_and_received_data = \ + recv_dense.data.get() + + send_dense.stop() + + np.testing.assert_equal(sent_and_received_data, + expected_data) + + def test_max_pooling_with_padding(self): + data = np.zeros((4, 4, 1)) + data[0, 0, 0] = 1 + data[1, 2, 0] = 1 + data[2, 1, 0] = 1 + + expected_data = np.zeros((2, 2, 1)) + expected_data[0, 0, 0] = 1 + expected_data[0, 1, 0] = 1 + expected_data[1, 0, 0] = 1 + + send_dense = SendDense(shape=(4, 4, 1), data=data) + down_sampler = MaxPooling(shape_in=(4, 4, 1), + kernel_size=4, + padding=(2, 2)) + recv_dense = RecvDense(shape=(2, 2, 1)) + + send_dense.out_port.connect(down_sampler.in_port) + down_sampler.out_port.connect(recv_dense.in_port) + + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=1) + + send_dense.run(condition=run_cnd, run_cfg=run_cfg) + + sent_and_received_data = \ + recv_dense.data.get() + + send_dense.stop() + + np.testing.assert_equal(sent_and_received_data, + expected_data) + + def test_max_pooling_non_square_kernel(self): + data = np.zeros((8, 8, 1)) + data[0, 0, 0] = 1 + data[1, 2, 0] = 1 + data[2, 1, 0] = 1 + + data[1, 5, 0] = 1 + data[2, 7, 0] = 1 + + data[4, 7, 0] = 1 + + expected_data = np.zeros((4, 2, 1)) + expected_data[0, 0, 0] = 1 + expected_data[0, 1, 0] = 1 + expected_data[1, 0, 0] = 1 + expected_data[1, 1, 0] = 1 + expected_data[2, 1, 0] = 1 + + send_dense = SendDense(shape=(8, 8, 1), data=data) + down_sampler = MaxPooling(shape_in=(8, 8, 1), + kernel_size=(2, 4)) + recv_dense = RecvDense(shape=(4, 2, 1)) + + send_dense.out_port.connect(down_sampler.in_port) + down_sampler.out_port.connect(recv_dense.in_port) + + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=1) + + send_dense.run(condition=run_cnd, run_cfg=run_cfg) + + sent_and_received_data = \ + recv_dense.data.get() + + send_dense.stop() + + np.testing.assert_equal(sent_and_received_data, + expected_data) + + def test_max_pooling_stride_different_than_kernel_size(self): + data = np.zeros((8, 8, 1)) + data[0, 0, 0] = 1 + data[1, 2, 0] = 1 + data[2, 1, 0] = 1 + + data[1, 5, 0] = 1 + data[2, 7, 0] = 1 + + data[4, 7, 0] = 1 + + expected_data = np.zeros((3, 3, 1)) + expected_data[0, 0, 0] = 1 + expected_data[0, 1, 0] = 1 + expected_data[0, 2, 0] = 1 + expected_data[1, 0, 0] = 1 + expected_data[1, 2, 0] = 1 + expected_data[2, 2, 0] = 1 + + send_dense = SendDense(shape=(8, 8, 1), data=data) + down_sampler = MaxPooling(shape_in=(8, 8, 1), + kernel_size=4, + stride=2) + recv_dense = RecvDense(shape=(3, 3, 1)) + + send_dense.out_port.connect(down_sampler.in_port) + down_sampler.out_port.connect(recv_dense.in_port) + + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=1) + + send_dense.run(condition=run_cnd, run_cfg=run_cfg) + + sent_and_received_data = \ + recv_dense.data.get() + + send_dense.stop() + + np.testing.assert_equal(sent_and_received_data, + expected_data) + +if __name__ == '__main__': + unittest.main() diff --git a/tests/lava/proc/max_pooling/test_process.py b/tests/lava/proc/max_pooling/test_process.py new file mode 100644 index 000000000..07bf34eff --- /dev/null +++ b/tests/lava/proc/max_pooling/test_process.py @@ -0,0 +1,63 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import unittest + +from lava.proc.max_pooling.process import MaxPooling + + +class TestProcessMaxPooling(unittest.TestCase): + def test_init(self): + """Tests instantiation of DownSamplingDense.""" + max_pooling = MaxPooling(shape_in=(240, 180, 1), + kernel_size=2) + + self.assertIsInstance(max_pooling, MaxPooling) + self.assertEqual(max_pooling.kernel_size.init, (2, 2)) + self.assertEqual(max_pooling.stride.init, (2, 2)) + self.assertEqual(max_pooling.padding.init, (0, 0)) + + def test_invalid_shape_in_throws_exception(self): + with self.assertRaises(ValueError): + MaxPooling(shape_in=(240, 180, 1, 1), + kernel_size=2) + + def test_invalid_kernel_size_throws_exception(self): + with self.assertRaises(ValueError): + MaxPooling(shape_in=(240, 180, 1), + kernel_size=(2, 2, 1)) + + def test_invalid_stride_throws_exception(self): + with self.assertRaises(ValueError): + MaxPooling(shape_in=(240, 180, 1), + kernel_size=2, + stride=(2, 2, 1)) + + def test_invalid_padding_throws_exception(self): + with self.assertRaises(ValueError): + MaxPooling(shape_in=(240, 180, 1), + kernel_size=2, + padding=(0, 0, 0)) + + def test_negative_shape_in_element_throws_exception(self): + with self.assertRaises(ValueError): + MaxPooling(shape_in=(-240, 180, 1), + kernel_size=2) + + def test_negative_kernel_size_throws_exception(self): + with self.assertRaises(ValueError): + MaxPooling(shape_in=(240, 180, 1), + kernel_size=-1) + + def test_negative_stride_throws_exception(self): + with self.assertRaises(ValueError): + MaxPooling(shape_in=(240, 180, 1), + kernel_size=2, + stride=-1) + + def test_negative_padding_throws_exception(self): + with self.assertRaises(ValueError): + MaxPooling(shape_in=(240, 180, 1), + kernel_size=2, + padding=-1) From 34e752a6ac4f31f2f92d5ba1d963a7f6a4e7270a Mon Sep 17 00:00:00 2001 From: gkarray Date: Wed, 7 Dec 2022 22:56:25 +0100 Subject: [PATCH 17/32] polished docstrings, split tests --- .../models.py | 8 +- .../process.py | 18 +- .../proc/event_data/events_to_frame/models.py | 27 +- .../event_data/events_to_frame/process.py | 59 +++- .../{aedat_data_loader.py => aedat_stream.py} | 13 +- src/lava/proc/event_temp/flattening.py | 63 ---- src/lava/proc/max_pooling/models.py | 39 ++- src/lava/proc/max_pooling/process.py | 71 ++++- .../event_data/binary_to_unary/__init__.py | 0 ...ry_to_unary_polarity.py => test_models.py} | 74 +---- .../binary_to_unary/test_process.py | 32 ++ .../event_data/event_temp/test_flattening.py | 189 ------------ .../event_data/events_to_frame/__init__.py | 0 .../event_data/events_to_frame/test_models.py | 176 +++++++++++ .../events_to_frame/test_process.py | 60 ++++ tests/lava/proc/event_data/io/__init__.py | 0 ...at_data_loader.py => test_aedat_stream.py} | 113 ++++--- .../lava/proc/event_data/io/test_dv_stream.py | 2 +- .../lava/proc/event_data/test_integration.py | 87 +++--- .../proc/event_data/to_frame/test_to_frame.py | 275 ------------------ tests/lava/proc/max_pooling/test_models.py | 23 +- tests/lava/proc/max_pooling/test_process.py | 4 + 22 files changed, 590 insertions(+), 743 deletions(-) rename src/lava/proc/event_data/{binary_to_unary => binary_to_unary_polarity}/models.py (86%) rename src/lava/proc/event_data/{binary_to_unary => binary_to_unary_polarity}/process.py (65%) rename src/lava/proc/event_data/io/{aedat_data_loader.py => aedat_stream.py} (93%) delete mode 100644 src/lava/proc/event_temp/flattening.py create mode 100644 tests/lava/proc/event_data/binary_to_unary/__init__.py rename tests/lava/proc/event_data/binary_to_unary/{test_binary_to_unary_polarity.py => test_models.py} (64%) create mode 100644 tests/lava/proc/event_data/binary_to_unary/test_process.py delete mode 100644 tests/lava/proc/event_data/event_temp/test_flattening.py create mode 100644 tests/lava/proc/event_data/events_to_frame/__init__.py create mode 100644 tests/lava/proc/event_data/events_to_frame/test_models.py create mode 100644 tests/lava/proc/event_data/events_to_frame/test_process.py create mode 100644 tests/lava/proc/event_data/io/__init__.py rename tests/lava/proc/event_data/io/{test_aedat_data_loader.py => test_aedat_stream.py} (78%) delete mode 100644 tests/lava/proc/event_data/to_frame/test_to_frame.py diff --git a/src/lava/proc/event_data/binary_to_unary/models.py b/src/lava/proc/event_data/binary_to_unary_polarity/models.py similarity index 86% rename from src/lava/proc/event_data/binary_to_unary/models.py rename to src/lava/proc/event_data/binary_to_unary_polarity/models.py index 29aa87707..2632c6b9a 100644 --- a/src/lava/proc/event_data/binary_to_unary/models.py +++ b/src/lava/proc/event_data/binary_to_unary_polarity/models.py @@ -10,19 +10,19 @@ from lava.magma.core.resources import CPU from lava.magma.core.decorator import implements, requires from lava.magma.core.model.py.model import PyLoihiProcessModel -from lava.proc.event_data.binary_to_unary.process import BinaryToUnaryPolarity +from lava.proc.event_data.binary_to_unary_polarity.process \ + import BinaryToUnaryPolarity @implements(proc=BinaryToUnaryPolarity, protocol=LoihiProtocol) @requires(CPU) -class BinaryToUnaryPolarityPM(PyLoihiProcessModel): +class PyBinaryToUnaryPolarityPM(PyLoihiProcessModel): """PyLoihiProcessModel implementing the BinaryToUnaryPolarity Process. Transforms event-based data with binary polarity (0 for negative events, 1 for positive events) coming from its in_port to unary polarity (1 for negative and positive events) and sends it through its out_port. """ - in_port: PyInPort = LavaPyType(PyInPort.VEC_SPARSE, int) out_port: PyOutPort = LavaPyType(PyOutPort.VEC_SPARSE, int) @@ -35,7 +35,7 @@ def run_spk(self) -> None: @staticmethod def _encode(data: np.ndarray) -> np.ndarray: - """Validate that a given shape is of the right format (max_num_events, ) + """Transform event-based data with binary polarity to unary polarity. Parameters ---------- diff --git a/src/lava/proc/event_data/binary_to_unary/process.py b/src/lava/proc/event_data/binary_to_unary_polarity/process.py similarity index 65% rename from src/lava/proc/event_data/binary_to_unary/process.py rename to src/lava/proc/event_data/binary_to_unary_polarity/process.py index b669f116d..0e1ffd1f5 100644 --- a/src/lava/proc/event_data/binary_to_unary/process.py +++ b/src/lava/proc/event_data/binary_to_unary_polarity/process.py @@ -15,7 +15,7 @@ class BinaryToUnaryPolarity(AbstractProcess): Parameters ---------- - shape : tuple + shape : tuple(int) Shape of InPort and OutPort. """ def __init__(self, @@ -32,17 +32,21 @@ def __init__(self, @staticmethod def _validate_shape(shape: ty.Tuple[int]) -> None: - """Validate that a given shape is of the right format (max_num_events, ) + """Validate that a given shape is of the form (max_num_events, ) where + max_num_events is strictly positive. Parameters ---------- - shape : tuple + shape : tuple(int) Shape to validate. """ if len(shape) != 1: - raise ValueError(f"Shape should be of the form (max_num_events, )." - f"{shape} was given.") + raise ValueError(f"Expected shape to be of the form " + f"(max_num_events, ). " + f"Found {shape=}.") if shape[0] <= 0: - raise ValueError(f"Max number of events should be positive." - f"{shape} was given.") + raise ValueError(f"Expected max number of events " + f"(first element of shape) to be strictly " + f"positive. " + f"Found {shape=}.") diff --git a/src/lava/proc/event_data/events_to_frame/models.py b/src/lava/proc/event_data/events_to_frame/models.py index 805976774..bb6b1ecc1 100644 --- a/src/lava/proc/event_data/events_to_frame/models.py +++ b/src/lava/proc/event_data/events_to_frame/models.py @@ -15,7 +15,12 @@ @implements(proc=EventsToFrame, protocol=LoihiProtocol) @requires(CPU) -class EventsToFramePM(PyLoihiProcessModel): +class PyEventsToFramePM(PyLoihiProcessModel): + """PyLoihiProcessModel implementing the EventsToFrame Process. + + Transforms a collection of (sparse) events with unary or binary polarity + into a (dense) frame of shape (W, H, 1) or (W, H, 2). + """ in_port: PyInPort = LavaPyType(PyInPort.VEC_SPARSE, int) out_port: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, int) @@ -27,6 +32,26 @@ def run_spk(self) -> None: self.out_port.send(dense_data) def _transform(self, data: np.ndarray, indices: np.ndarray) -> np.ndarray: + """Transform collection of sparsely represented events into a densely + represented frame of events. + + (1) If output shape is (W, H, 1), input is assumed to be unary. + (2) If output shape is (W, H, 2), input is assumed to be binary. + Negative events are represented by 1s in first channel. + Positive events are represented by 1s in second channel. + + Parameters + ---------- + data : ndarray + Array of events. + indices : ndarray + Array of event indices. + + Returns + ---------- + result : ndarray + Frame of events. + """ shape_out = self.out_port.shape dense_data = np.zeros(shape_out) diff --git a/src/lava/proc/event_data/events_to_frame/process.py b/src/lava/proc/event_data/events_to_frame/process.py index f28425195..b80da3f5d 100644 --- a/src/lava/proc/event_data/events_to_frame/process.py +++ b/src/lava/proc/event_data/events_to_frame/process.py @@ -9,6 +9,22 @@ class EventsToFrame(AbstractProcess): + """Process that transforms a collection of (sparse) events into a (dense) + frame. + + Output shape can be either (W, H, 1) or (W, H, 2). + (1) If output shape is (W, H, 1), input is assumed to be unary. + (2) If output shape is (W, H, 2), input is assumed to be binary. + Negative events are represented by 1s in first channel. + Positive events are represented by 1s in second channel. + + Parameters + ---------- + shape_in : tuple(int) + Shape of InPort. + shape_out : tuple(int, int, int) + Shape of OutPort. + """ def __init__(self, *, shape_in: ty.Tuple[int], @@ -24,26 +40,49 @@ def __init__(self, self.in_port = InPort(shape=shape_in) self.out_port = OutPort(shape=shape_out) - # TODO: Re-write error messages @staticmethod def _validate_shape_in(shape_in: ty.Tuple[int]) -> None: + """Validate that a given shape is of the form (max_num_events, ) where + max_num_events is strictly positive. + + Parameters + ---------- + shape_in : tuple(int) + Shape to validate. + """ if len(shape_in) != 1: - raise ValueError(f"Shape of the InPort should be (n,). " - f"{shape_in} was given.") + raise ValueError(f"Expected shape_in to be of the form " + f"(max_num_events, ). " + f"Found {shape_in=}.") if shape_in[0] <= 0: - raise ValueError(f"Width of shape_in should be positive. {shape_in} given.") + raise ValueError(f"Expected max number of events " + f"(first element of shape_in) to be strictly " + f"positive. " + f"Found {shape_in=}.") - # TODO: Re-write error messages @staticmethod def _validate_shape_out(shape_out: ty.Tuple[int, int, int]) -> None: + """Validate that a given shape is of the form (W, H, C) where W and H + are strictly positive and C either 1 or 2. + + Parameters + ---------- + shape_out : tuple(int, int, int) + Shape to validate. + """ if not len(shape_out) == 3: - raise ValueError(f"shape_out should be 3 dimensional. {shape_out} given.") + raise ValueError(f"Expected shape_out to be 3D. " + f"Found {shape_out=}.") if not (shape_out[2] == 1 or shape_out[2] == 2): - raise ValueError(f"Depth of the shape_out argument should be an integer and equal to 2. " - f"{shape_out} given.") + raise ValueError(f"Expected number of channels " + f"(third element of shape_out) to be either " + f"1 or 2. " + f"Found {shape_out=}.") if shape_out[0] <= 0 or shape_out[1] <= 0: - raise ValueError(f"Width and height of the shape_out argument should be positive. " - f"{shape_out} given.") + raise ValueError(f"Expected width and height " + f"(first and second elements of shape_out) to be " + f"strictly positive. " + f"Found {shape_out=}.") diff --git a/src/lava/proc/event_data/io/aedat_data_loader.py b/src/lava/proc/event_data/io/aedat_stream.py similarity index 93% rename from src/lava/proc/event_data/io/aedat_data_loader.py rename to src/lava/proc/event_data/io/aedat_stream.py index 011933cd2..6f8c2b0da 100644 --- a/src/lava/proc/event_data/io/aedat_data_loader.py +++ b/src/lava/proc/event_data/io/aedat_stream.py @@ -6,7 +6,6 @@ import numpy as np import os.path import typing as ty -import warnings from lava.magma.core.process.process import AbstractProcess from lava.magma.core.process.ports.ports import OutPort @@ -19,7 +18,7 @@ from lava.utils.events import sub_sample -class AedatDataLoader(AbstractProcess): +class AedatStream(AbstractProcess): """Process that reads event-based data from an aedat4 file. This process outputs a sparse tensor of the event data stream, meaning @@ -87,9 +86,9 @@ def _validate_shape_out(shape_out: ty.Tuple[int]) -> None: f"{shape_out} was given.") -@implements(proc=AedatDataLoader, protocol=LoihiProtocol) +@implements(proc=AedatStream, protocol=LoihiProtocol) @requires(CPU) -class AedatDataLoaderPM(PyLoihiProcessModel): +class AedatStreamPM(PyLoihiProcessModel): """ Implementation of the Aedat Data Loader process on Loihi, with sparse representation of events. @@ -120,7 +119,8 @@ def run_spk(self) -> None: # If we have more data than our shape allows, subsample if data.shape[0] > self._shape_out[0]: - data, indices = sub_sample(data, indices, self._shape_out[0], self._random_rng) + data, indices = sub_sample(data, indices, + self._shape_out[0], self._random_rng) self.out_port.send(data, indices) @@ -147,9 +147,8 @@ def _init_aedat_file(self) -> None: self._file = AedatFile(file_name=self._file_path) self._stream = self._file["events"].numpy() - # TODO: look into the type of "events" def _encode_data_and_indices(self, - events: ty.Dict) \ + events: np.ndarray) \ -> ty.Tuple[np.ndarray, np.ndarray]: """ Extracts the polarity data, and x and y indices from the given diff --git a/src/lava/proc/event_temp/flattening.py b/src/lava/proc/event_temp/flattening.py deleted file mode 100644 index 4894890a1..000000000 --- a/src/lava/proc/event_temp/flattening.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: BSD-3-Clause -# See: https://spdx.org/licenses/ - -import math -import numpy as np -import typing as ty - -from lava.magma.core.process.process import AbstractProcess -from lava.magma.core.process.ports.ports import InPort, OutPort -from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol -from lava.magma.core.model.py.ports import PyInPort, PyOutPort -from lava.magma.core.model.py.type import LavaPyType -from lava.magma.core.resources import CPU -from lava.magma.core.decorator import implements, requires -from lava.magma.core.model.py.model import PyLoihiProcessModel - - -class Flattening(AbstractProcess): - def __init__(self, - *, - shape_in: ty.Union[ty.Tuple[int, int], ty.Tuple[int, int, int]], - **kwargs) -> None: - super().__init__(shape_in=shape_in, - **kwargs) - - self._validate_shape_in(shape_in) - - shape_out = (math.prod(shape_in),) - - self.in_port = InPort(shape_in) - self.out_port = OutPort(shape_out) - - @staticmethod - def _validate_shape_in(shape_in: ty.Union[ty.Tuple[int, int], ty.Tuple[int, int, int]]) -> None: - if not (len(shape_in) == 2 or len(shape_in) == 3): - raise ValueError(f"shape_in should be 2 or 3 dimensional. " - f"{shape_in} was given.") - - if len(shape_in) == 3: - if shape_in[2] != 2: - raise ValueError(f"Third dimension of shape_in should be " - f"equal to 2." - f"{shape_in} was given.") - - if shape_in[0] <= 0 or shape_in[1] <= 0: - raise ValueError(f"Width and height of shape_in should be positive." - f"{shape_in} was given.") - - -@implements(proc=Flattening, protocol=LoihiProtocol) -@requires(CPU) -class FlatteningPM(PyLoihiProcessModel): - in_port: PyInPort = LavaPyType(PyInPort.VEC_DENSE, int) - out_port: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, int) - - def __init__(self, proc_params: dict) -> None: - super().__init__(proc_params) - self._shape_in = proc_params["shape_in"] - - def run_spk(self) -> None: - data = self.in_port.recv() - self.out_port.send(data.flatten()) diff --git a/src/lava/proc/max_pooling/models.py b/src/lava/proc/max_pooling/models.py index 55832200a..53a7133f7 100644 --- a/src/lava/proc/max_pooling/models.py +++ b/src/lava/proc/max_pooling/models.py @@ -13,12 +13,15 @@ from lava.magma.core.decorator import implements, requires from lava.magma.core.model.py.model import PyLoihiProcessModel from lava.proc.max_pooling.process import MaxPooling -from lava.proc.conv import utils @implements(proc=MaxPooling, protocol=LoihiProtocol) @requires(CPU) -class MaxPoolingPM(PyLoihiProcessModel): +class PyMaxPoolingPM(PyLoihiProcessModel): + """PyLoihiProcessModel implementing the MaxPooling Process. + + Applies the max-pooling operation on incoming data. + """ in_port: PyInPort = LavaPyType(PyInPort.VEC_DENSE, int) out_port: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, int) @@ -34,6 +37,18 @@ def run_spk(self) -> None: self.out_port.send(max_pooled_data) def _max_pooling(self, data: np.ndarray) -> np.ndarray: + """Applies the max-pooling operation on data with shape (W, H, C). + + Parameters + ---------- + data : np.ndarray + Incoming data. + + Returns + ---------- + result : np.ndarray + 3D result after max-pooling. + """ result = np.zeros(self.out_port.shape) for channel in range(self.out_port.shape[-1]): @@ -52,6 +67,26 @@ def _max_pooling_2d(data: np.ndarray, kernel_size: np.ndarray, stride: np.ndarray, padding: np.ndarray) -> np.ndarray: + """Applies the max-pooling operation on data with shape (W, H). + + Parameters + ---------- + data : np.ndarray + Data with shape (W, H). + output_shape : tuple(int, int) + Output shape. + kernel_size : np.ndarray + Max-pooling kernel size. + stride : np.ndarray + Max-pooling stride. + padding : np.ndarray + Padding to apply. + + Returns + ---------- + result : np.ndarray + 2D result after max-pooling. + """ padded_data = np.pad(data, (padding[0], padding[1]), mode='constant').copy() diff --git a/src/lava/proc/max_pooling/process.py b/src/lava/proc/max_pooling/process.py index 30522f274..caf5e2c1d 100644 --- a/src/lava/proc/max_pooling/process.py +++ b/src/lava/proc/max_pooling/process.py @@ -11,13 +11,27 @@ class MaxPooling(AbstractProcess): + """Process that applies the max-pooling operation on incoming data. + + Parameters + ---------- + shape_in : tuple(int, int, int) + Shape of InPort. + kernel_size : int or tuple(int, int) + Size of the max-pooling kernel. + stride : int or tuple(int, int), optional + Stride size. Default is None. + If not given, use kernel_size as max-pooling stride. + padding : int or tuple(int, int), optional + Padding size. Default is 0. + """ def __init__( self, *, shape_in: ty.Tuple[int, int, int], kernel_size: ty.Union[int, ty.Tuple[int, int]], stride: ty.Optional[ty.Union[int, ty.Tuple[int, int]]] = None, - padding: ty.Optional[ty.Union[int, ty.Tuple[int, int]]] = (0, 0), + padding: ty.Optional[ty.Union[int, ty.Tuple[int, int]]] = 0, **kwargs) -> None: super().__init__(shape_in=shape_in, kernel_size=kernel_size, @@ -53,27 +67,64 @@ def __init__( @staticmethod def _validate_shape_in(shape_in: ty.Tuple[int, int, int]) -> None: + """Validate that a given shape is of the form (W, H, C) where W and H + are strictly positive. + + Parameters + ---------- + shape_in : tuple + Shape to validate. + """ if not len(shape_in) == 3: - raise ValueError(f"shape_in should be 3 dimensional. {shape_in} given.") + raise ValueError(f"Expected shape_in to be 3D. " + f"Found {shape_in=}.") if shape_in[0] <= 0 or shape_in[1] <= 0: - raise ValueError(f"Width and height of shape_in should be positive." - f"{shape_in} given.") + raise ValueError(f"Expected width and height " + f"(first and second elements of shape_in) to be " + f"strictly positive. " + f"Found {shape_in=}.") @staticmethod def _validate_kernel_size(kernel_size: ty.Tuple[int, int]) -> None: + """Validate that a given kernel size (W, H) has W and H strictly + positive. + + Parameters + ---------- + kernel_size : tuple + Kernel size to validate. + """ if kernel_size[0] <= 0 or kernel_size[1] <= 0: - raise ValueError(f"Kernel size elements should be strictly positive." - f"{kernel_size=} found.") + raise ValueError(f"Expected kernel_size elements to be strictly " + f"positive. " + f"Found {kernel_size=}.") @staticmethod def _validate_stride(stride: ty.Tuple[int, int]) -> None: + """Validate that a given stride (W, H) has W and H strictly + positive. + + Parameters + ---------- + stride : tuple + Stride to validate. + """ if stride[0] <= 0 or stride[1] <= 0: - raise ValueError(f"Stride elements should be strictly positive." - f"{stride=} found.") + raise ValueError(f"Expected stride elements to be strictly " + f"positive. " + f"Found {stride=}.") @staticmethod def _validate_padding(padding: ty.Tuple[int, int]) -> None: + """Validate that a given padding (W, H) has W and H strictly + positive. + + Parameters + ---------- + padding : tuple + Padding to validate. + """ if padding[0] < 0 or padding[1] < 0: - raise ValueError(f"Padding elements should be positive." - f"{padding=} found.") \ No newline at end of file + raise ValueError(f"Expected padding elements to be positive. " + f"Found {padding=} .") diff --git a/tests/lava/proc/event_data/binary_to_unary/__init__.py b/tests/lava/proc/event_data/binary_to_unary/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/lava/proc/event_data/binary_to_unary/test_binary_to_unary_polarity.py b/tests/lava/proc/event_data/binary_to_unary/test_models.py similarity index 64% rename from tests/lava/proc/event_data/binary_to_unary/test_binary_to_unary_polarity.py rename to tests/lava/proc/event_data/binary_to_unary/test_models.py index fc85904eb..b25141c2b 100644 --- a/tests/lava/proc/event_data/binary_to_unary/test_binary_to_unary_polarity.py +++ b/tests/lava/proc/event_data/binary_to_unary/test_models.py @@ -17,16 +17,19 @@ from lava.magma.core.model.py.model import PyLoihiProcessModel from lava.magma.core.run_conditions import RunSteps from lava.magma.core.run_configs import Loihi1SimCfg -from lava.proc.event_data.binary_to_unary.models import BinaryToUnaryPolarity, BinaryToUnaryPolarityPM +from lava.proc.event_data.binary_to_unary_polarity.process \ + import BinaryToUnaryPolarity +from lava.proc.event_data.binary_to_unary_polarity.models \ + import PyBinaryToUnaryPolarityPM class RecvSparse(AbstractProcess): - """ - Process that receives arbitrary sparse data. + """Process that receives arbitrary sparse data. Parameters ---------- - shape: tuple, shape of the process + shape: tuple + Shape of the InPort and Vars. """ def __init__(self, shape: ty.Tuple[int]) -> None: @@ -41,15 +44,14 @@ def __init__(self, @implements(proc=RecvSparse, protocol=LoihiProtocol) @requires(CPU) class PyRecvSparsePM(PyLoihiProcessModel): + """Receives sparse data from PyInPort and stores a padded version of + received data and indices in Vars.""" in_port: PyInPort = LavaPyType(PyInPort.VEC_SPARSE, int) data: np.ndarray = LavaPyType(np.ndarray, int) idx: np.ndarray = LavaPyType(np.ndarray, int) def run_spk(self) -> None: - """ - Receives the data and pads with zeros to enable access with get() - """ data, idx = self.in_port.recv() self.data = np.pad(data, @@ -61,12 +63,12 @@ def run_spk(self) -> None: class SendSparse(AbstractProcess): - """ - Process that sends arbitrary sparse data. + """Process that sends arbitrary sparse data. Parameters ---------- - shape: tuple, shape of the process + shape: tuple + Shape of the OutPort. """ def __init__(self, shape: ty.Tuple[int], @@ -80,6 +82,7 @@ def __init__(self, @implements(proc=SendSparse, protocol=LoihiProtocol) @requires(CPU) class PySendSparsePM(PyLoihiProcessModel): + """Sends sparse data to PyOutPort.""" out_port: PyOutPort = LavaPyType(PyOutPort.VEC_SPARSE, int) def __init__(self, proc_params): @@ -94,35 +97,12 @@ def run_spk(self) -> None: self.out_port.send(data, idx) -class TestProcessBinaryToUnaryPolarity(unittest.TestCase): - def test_init(self): - """Tests instantiation of BinaryToUnaryPolarity.""" - converter = BinaryToUnaryPolarity(shape=(43200,)) - - self.assertIsInstance(converter, BinaryToUnaryPolarity) - self.assertEqual(converter.proc_params["shape"], (43200,)) - - def test_invalid_shape_throws_exception(self): - """Tests whether a shape argument with an invalid shape throws an exception.""" - with(self.assertRaises(ValueError)): - BinaryToUnaryPolarity(shape=(240, 180)) - - def test_negative_size_throws_exception(self): - """Tests whether a shape argument with a negative size throws an exception.""" - with(self.assertRaises(ValueError)): - BinaryToUnaryPolarity(shape=(-43200,)) - -# TODO: add doc strings class TestProcessModelBinaryToUnaryPolarity(unittest.TestCase): def test_init(self): """Tests instantiation of the BinaryToUnary process model.""" - proc_params = { - "shape": (10,) - } + pm = PyBinaryToUnaryPolarityPM() - pm = BinaryToUnaryPolarityPM(proc_params) - - self.assertIsInstance(pm, BinaryToUnaryPolarityPM) + self.assertIsInstance(pm, PyBinaryToUnaryPolarityPM) def test_binary_to_unary_polarity_encoding(self): """Tests whether the encoding from binary to unary works correctly.""" @@ -141,11 +121,9 @@ def test_binary_to_unary_polarity_encoding(self): send_sparse.out_port.connect(binary_to_unary_encoder.in_port) binary_to_unary_encoder.out_port.connect(recv_sparse.in_port) - # Run parameters run_cfg = Loihi1SimCfg() run_cnd = RunSteps(num_steps=1) - # Running send_sparse.run(condition=run_cnd, run_cfg=run_cfg) sent_and_received_data = \ @@ -153,7 +131,6 @@ def test_binary_to_unary_polarity_encoding(self): sent_and_received_indices = \ recv_sparse.idx.get()[:expected_indices.shape[0]] - # Stopping send_sparse.stop() np.testing.assert_equal(sent_and_received_data, @@ -161,27 +138,6 @@ def test_binary_to_unary_polarity_encoding(self): np.testing.assert_equal(sent_and_received_indices, expected_indices) - # TODO: I guess not needed? should any edge cases be treated? - # def test_run(self): - # data = np.array([1, 1, 1, 0, 1, 0, 0, 1, 0]) - # indices = np.array([1, 5, 4, 3, 3, 2, 0, 1, 0]) - # - # send_sparse = SendSparse(shape=(10,), data=data, indices=indices) - # binary_to_unary_encoder = BinaryToUnaryPolarity(shape=(10,)) - # - # send_sparse.out_port.connect(binary_to_unary_encoder.in_port) - # - # # Run parameters - # run_cfg = Loihi1SimCfg() - # run_cnd = RunSteps(num_steps=1) - # - # # Running - # binary_to_unary_encoder.run(condition=run_cnd, run_cfg=run_cfg) - # - # binary_to_unary_encoder.stop() - # - # self.assertFalse(binary_to_unary_encoder.runtime._is_running) - if __name__ == '__main__': unittest.main() diff --git a/tests/lava/proc/event_data/binary_to_unary/test_process.py b/tests/lava/proc/event_data/binary_to_unary/test_process.py new file mode 100644 index 000000000..12d05f995 --- /dev/null +++ b/tests/lava/proc/event_data/binary_to_unary/test_process.py @@ -0,0 +1,32 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import unittest + +from lava.proc.event_data.binary_to_unary_polarity.process \ + import BinaryToUnaryPolarity + + +class TestProcessBinaryToUnaryPolarity(unittest.TestCase): + def test_init(self): + """Tests instantiation of BinaryToUnaryPolarity.""" + converter = BinaryToUnaryPolarity(shape=(43200,)) + + self.assertIsInstance(converter, BinaryToUnaryPolarity) + + def test_invalid_shape_throws_exception(self): + """Tests whether a shape argument with an invalid shape + throws an exception.""" + with(self.assertRaises(ValueError)): + BinaryToUnaryPolarity(shape=(240, 180)) + + def test_negative_size_throws_exception(self): + """Tests whether a shape argument with a negative size + throws an exception.""" + with(self.assertRaises(ValueError)): + BinaryToUnaryPolarity(shape=(-43200,)) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/lava/proc/event_data/event_temp/test_flattening.py b/tests/lava/proc/event_data/event_temp/test_flattening.py deleted file mode 100644 index 104993dc6..000000000 --- a/tests/lava/proc/event_data/event_temp/test_flattening.py +++ /dev/null @@ -1,189 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: BSD-3-Clause -# See: https://spdx.org/licenses/ - -import numpy as np -import typing as ty -import unittest - -from lava.magma.core.process.process import AbstractProcess -from lava.magma.core.process.ports.ports import InPort, OutPort -from lava.magma.core.process.variable import Var -from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol -from lava.magma.core.model.py.ports import PyInPort, PyOutPort -from lava.magma.core.model.py.type import LavaPyType -from lava.magma.core.resources import CPU -from lava.magma.core.decorator import implements, requires -from lava.magma.core.model.py.model import PyLoihiProcessModel -from lava.magma.core.run_conditions import RunSteps -from lava.magma.core.run_configs import Loihi1SimCfg -from lava.proc.event_data.event_pre_processor.dense_to_dense.flattening import Flattening, FlatteningPM - -# TODO: add doc strings for these processes -class RecvDense(AbstractProcess): - def __init__(self, - shape: ty.Tuple[int]) -> None: - super().__init__(shape=shape) - - self.in_port = InPort(shape=shape) - - self.data = Var(shape=shape, init=np.zeros(shape, dtype=int)) - - -@implements(proc=RecvDense, protocol=LoihiProtocol) -@requires(CPU) -class PyRecvDensePM(PyLoihiProcessModel): - in_port: PyInPort = LavaPyType(PyInPort.VEC_DENSE, int) - - data: np.ndarray = LavaPyType(np.ndarray, int) - - def run_spk(self) -> None: - data = self.in_port.recv() - - self.data = data - - -class SendDense(AbstractProcess): - def __init__(self, - shape: ty.Union[ty.Tuple[int, int], ty.Tuple[int, int, int]], - data: np.ndarray) -> None: - super().__init__(shape=shape, data=data) - - self.out_port = OutPort(shape=shape) - - -@implements(proc=SendDense, protocol=LoihiProtocol) -@requires(CPU) -class PySendDensePM(PyLoihiProcessModel): - out_port: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, int) - - def __init__(self, proc_params): - super().__init__(proc_params) - self._data = proc_params["data"] - - def run_spk(self) -> None: - data = self._data - - self.out_port.send(data) - -class TestProcessFlattening(unittest.TestCase): - def test_init(self): - """Tests instantiation of DownSamplingDense.""" - flattener = Flattening(shape_in=(240, 180)) - - self.assertIsInstance(flattener, Flattening) - self.assertEqual(flattener.proc_params["shape_in"], (240, 180)) - - def test_negative_width_or_height_throws_exception(self): - """Tests whether an exception is thrown when a negative width or height for the shape_in argument is given.""" - with(self.assertRaises(ValueError)): - Flattening(shape_in=(-240, 180)) - - with(self.assertRaises(ValueError)): - Flattening(shape_in=(240, -180)) - - def test_invalid_shape_throws_exception(self): - """Tests whether an exception is thrown when a 1d or 4d value for the shape_in argument is given.""" - with(self.assertRaises(ValueError)): - Flattening(shape_in=(240,)) - - with(self.assertRaises(ValueError)): - Flattening(shape_in=(240, 180, 2, 1)) - - def test_third_dimension_not_2_throws_exception(self): - """Tests whether an exception is thrown if the value of the 3rd dimension for the shape_in argument is not 2.""" - with(self.assertRaises(ValueError)): - Flattening(shape_in=(240, 180, 1)) - -# TODO: add doc strings -class TestProcessModelFlattening(unittest.TestCase): - def test_init(self): - """Tests instantiation of the Flattening process model""" - proc_params = { - "shape_in": (240, 180) - } - - pm = FlatteningPM(proc_params) - - self.assertIsInstance(pm, FlatteningPM) - self.assertEqual(pm._shape_in, proc_params["shape_in"]) - - # TODO: can probably be deleted - def test_run(self): - data = np.zeros((8, 8)) - - send_dense = SendDense(shape=(8, 8), data=data) - flattener = Flattening(shape_in=(8, 8)) - - send_dense.out_port.connect(flattener.in_port) - - # Run parameters - num_steps = 1 - run_cfg = Loihi1SimCfg() - run_cnd = RunSteps(num_steps=num_steps) - - # Running - flattener.run(condition=run_cnd, run_cfg=run_cfg) - - # Stopping - flattener.stop() - - self.assertFalse(flattener.runtime._is_running) - - def test_flattening_2d(self): - data = np.zeros((8, 8)) - - expected_data = np.zeros((64,)) - - send_dense = SendDense(shape=(8, 8), data=data) - flattener = Flattening(shape_in=(8, 8)) - recv_dense = RecvDense(shape=(64,)) - - send_dense.out_port.connect(flattener.in_port) - flattener.out_port.connect(recv_dense.in_port) - - # Run parameters - run_cfg = Loihi1SimCfg() - run_cnd = RunSteps(num_steps=1) - - # Running - send_dense.run(condition=run_cnd, run_cfg=run_cfg) - - sent_and_received_data = \ - recv_dense.data.get() - - send_dense.stop() - - np.testing.assert_equal(sent_and_received_data, - expected_data) - - def test_flattening_3d(self): - data = np.zeros((8, 8, 2)) - - expected_data = np.zeros((128,)) - - send_dense = SendDense(shape=(8, 8, 2), data=data) - flattener = Flattening(shape_in=(8, 8, 2)) - recv_dense = RecvDense(shape=(128,)) - - send_dense.out_port.connect(flattener.in_port) - flattener.out_port.connect(recv_dense.in_port) - - # Run parameters - run_cfg = Loihi1SimCfg() - run_cnd = RunSteps(num_steps=1) - - # Running - send_dense.run(condition=run_cnd, run_cfg=run_cfg) - - sent_and_received_data = \ - recv_dense.data.get() - - send_dense.stop() - - np.testing.assert_equal(sent_and_received_data, - expected_data) - - -if __name__ == '__main__': - unittest.main() diff --git a/tests/lava/proc/event_data/events_to_frame/__init__.py b/tests/lava/proc/event_data/events_to_frame/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/lava/proc/event_data/events_to_frame/test_models.py b/tests/lava/proc/event_data/events_to_frame/test_models.py new file mode 100644 index 000000000..33950d239 --- /dev/null +++ b/tests/lava/proc/event_data/events_to_frame/test_models.py @@ -0,0 +1,176 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import numpy as np +import typing as ty +import unittest + +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.process.ports.ports import InPort, OutPort +from lava.magma.core.process.variable import Var +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol +from lava.magma.core.model.py.ports import PyInPort, PyOutPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.resources import CPU +from lava.magma.core.decorator import implements, requires +from lava.magma.core.model.py.model import PyLoihiProcessModel +from lava.magma.core.run_conditions import RunSteps +from lava.magma.core.run_configs import Loihi1SimCfg +from lava.proc.event_data.events_to_frame.process import EventsToFrame +from lava.proc.event_data.events_to_frame.models import PyEventsToFramePM + + +class RecvDense(AbstractProcess): + """Process that receives arbitrary dense data. + + Parameters + ---------- + shape: tuple + Shape of the InPort and Var. + """ + def __init__(self, + shape: ty.Union[ + ty.Tuple[int, int], ty.Tuple[int, int, int]]) -> None: + super().__init__(shape=shape) + + self.in_port = InPort(shape=shape) + + self.data = Var(shape=shape, init=np.zeros(shape, dtype=int)) + + +@implements(proc=RecvDense, protocol=LoihiProtocol) +@requires(CPU) +class PyRecvDensePM(PyLoihiProcessModel): + """Receives dense data from PyInPort and stores it in a Var.""" + in_port: PyInPort = LavaPyType(PyInPort.VEC_DENSE, int) + + data: np.ndarray = LavaPyType(np.ndarray, int) + + def run_spk(self) -> None: + data = self.in_port.recv() + + self.data = data + + +class SendSparse(AbstractProcess): + """Process that sends arbitrary sparse data. + + Parameters + ---------- + shape: tuple + Shape of the OutPort. + """ + def __init__(self, + shape: ty.Tuple[int], + data: np.ndarray, + indices: np.ndarray) -> None: + super().__init__(shape=shape, data=data, indices=indices) + + self.out_port = OutPort(shape=shape) + + +@implements(proc=SendSparse, protocol=LoihiProtocol) +@requires(CPU) +class PySendSparsePM(PyLoihiProcessModel): + """Sends sparse data to PyOutPort.""" + out_port: PyOutPort = LavaPyType(PyOutPort.VEC_SPARSE, int) + + def __init__(self, proc_params): + super().__init__(proc_params) + self._data = proc_params["data"] + self._indices = proc_params["indices"] + + def run_spk(self) -> None: + data = self._data + idx = self._indices + + self.out_port.send(data, idx) + + +class TestProcessModelEventsEventsToFrame(unittest.TestCase): + def test_init(self): + """Tests instantiation of the SparseToDense process model.""" + pm = PyEventsToFramePM() + + self.assertIsInstance(pm, PyEventsToFramePM) + + def test_third_dimension_1(self): + data = np.array([1, 1, 1, 1, 1, 1]) + xs = [0, 1, 2, 1, 2, 4] + ys = [0, 2, 1, 5, 7, 7] + indices = np.ravel_multi_index((xs, ys), (8, 8)) + + expected_data = np.zeros((8, 8, 1)) + expected_data[0, 0, 0] = 1 + expected_data[1, 2, 0] = 1 + expected_data[2, 1, 0] = 1 + + expected_data[1, 5, 0] = 1 + expected_data[2, 7, 0] = 1 + + expected_data[4, 7, 0] = 1 + + send_sparse = SendSparse(shape=(10,), data=data, indices=indices) + to_frame = EventsToFrame(shape_in=(10,), + shape_out=(8, 8, 1)) + recv_dense = RecvDense(shape=(8, 8, 1)) + + send_sparse.out_port.connect(to_frame.in_port) + to_frame.out_port.connect(recv_dense.in_port) + + num_steps = 1 + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=num_steps) + + to_frame.run(condition=run_cnd, run_cfg=run_cfg) + + sent_and_received_data = \ + recv_dense.data.get() + + to_frame.stop() + + np.testing.assert_equal(sent_and_received_data, + expected_data) + + def test_third_dimension_2(self): + data = np.array([1, 0, 1, 0, 1, 0]) + xs = [0, 1, 2, 1, 2, 4] + ys = [0, 2, 1, 5, 7, 7] + indices = np.ravel_multi_index((xs, ys), (8, 8)) + + expected_data = np.zeros((8, 8, 2)) + expected_data[0, 0, 1] = 1 + expected_data[1, 2, 0] = 1 + expected_data[2, 1, 1] = 1 + + expected_data[1, 5, 0] = 1 + expected_data[2, 7, 1] = 1 + + expected_data[4, 7, 0] = 1 + + send_sparse = SendSparse(shape=(10,), data=data, indices=indices) + to_frame = EventsToFrame(shape_in=(10,), + shape_out=(8, 8, 2)) + recv_dense = RecvDense(shape=(8, 8, 2)) + + send_sparse.out_port.connect(to_frame.in_port) + to_frame.out_port.connect(recv_dense.in_port) + + num_steps = 1 + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=num_steps) + + to_frame.run(condition=run_cnd, run_cfg=run_cfg) + + sent_and_received_data = \ + recv_dense.data.get() + + to_frame.stop() + + np.testing.assert_equal(sent_and_received_data, + expected_data) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/lava/proc/event_data/events_to_frame/test_process.py b/tests/lava/proc/event_data/events_to_frame/test_process.py new file mode 100644 index 000000000..0c5c97f83 --- /dev/null +++ b/tests/lava/proc/event_data/events_to_frame/test_process.py @@ -0,0 +1,60 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import unittest + +from lava.proc.event_data.events_to_frame.process import EventsToFrame + + +class TestProcessEventsToFrame(unittest.TestCase): + def test_init(self): + """Tests instantiation of SparseToDense for a 3D output.""" + to_frame = EventsToFrame(shape_in=(43200,), + shape_out=(240, 180, 1)) + + self.assertIsInstance(to_frame, EventsToFrame) + + def test_invalid_shape_in_throws_exception(self): + """Tests whether a shape_in argument that isn't (n,) throws + an exception.""" + with(self.assertRaises(ValueError)): + EventsToFrame(shape_in=(43200, 1), + shape_out=(240, 180, 1)) + + def test_invalid_shape_out_throws_exception(self): + """Tests whether an exception is thrown when a 1d or 4d value + for the shape_out argument is given.""" + with(self.assertRaises(ValueError)): + EventsToFrame(shape_in=(43200,), + shape_out=(240,)) + + with(self.assertRaises(ValueError)): + EventsToFrame(shape_in=(43200,), + shape_out=(240, 180, 3)) + + with(self.assertRaises(ValueError)): + EventsToFrame(shape_in=(43200,), + shape_out=(240, 180, 2, 1)) + + def test_negative_size_shape_in_throws_exception(self): + """Tests whether an exception is thrown when a negative integer for + the shape_in argument is given""" + with(self.assertRaises(ValueError)): + EventsToFrame(shape_in=(-43200,), + shape_out=(240, 180)) + + def test_negative_width_or_height_shape_out_throws_exception(self): + """Tests whether an exception is thrown when a negative width or height + for the shape_out argument is given""" + with(self.assertRaises(ValueError)): + EventsToFrame(shape_in=(43200,), + shape_out=(-240, 180)) + + with(self.assertRaises(ValueError)): + EventsToFrame(shape_in=(43200,), + shape_out=(240, -180)) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/lava/proc/event_data/io/__init__.py b/tests/lava/proc/event_data/io/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/lava/proc/event_data/io/test_aedat_data_loader.py b/tests/lava/proc/event_data/io/test_aedat_stream.py similarity index 78% rename from tests/lava/proc/event_data/io/test_aedat_data_loader.py rename to tests/lava/proc/event_data/io/test_aedat_stream.py index 4f3307ac0..bf5a6a101 100644 --- a/tests/lava/proc/event_data/io/test_aedat_data_loader.py +++ b/tests/lava/proc/event_data/io/test_aedat_stream.py @@ -19,15 +19,16 @@ from lava.magma.core.model.py.model import PyLoihiProcessModel from lava.magma.core.run_conditions import RunSteps from lava.magma.core.run_configs import Loihi1SimCfg -from lava.proc.event_data.io.aedat_data_loader import AedatDataLoader, AedatDataLoaderPM +from lava.proc.event_data.io.aedat_stream import AedatStream, AedatStreamPM + class RecvSparse(AbstractProcess): - """ - Process that receives arbitrary sparse data. + """Process that receives arbitrary sparse data. Parameters ---------- - shape: tuple, shape of the process + shape: tuple + Shape of the InPort and Vars. """ def __init__(self, shape: ty.Tuple[int]) -> None: @@ -42,15 +43,14 @@ def __init__(self, @implements(proc=RecvSparse, protocol=LoihiProtocol) @requires(CPU) class PyRecvSparsePM(PyLoihiProcessModel): + """Receives sparse data from PyInPort and stores a padded version of + received data and indices in Vars.""" in_port: PyInPort = LavaPyType(PyInPort.VEC_SPARSE, int) data: np.ndarray = LavaPyType(np.ndarray, int) idx: np.ndarray = LavaPyType(np.ndarray, int) def run_spk(self) -> None: - """ - Receives the data and pads with zeros to enable access with get(). - """ data, idx = self.in_port.recv() self.data = np.pad(data, @@ -59,15 +59,15 @@ def run_spk(self) -> None: pad_width=(0, self.in_port.shape[0] - data.shape[0])) -class TestProcessAedatDataLoader(unittest.TestCase): +class TestProcessAedatStream(unittest.TestCase): def test_init(self): """ - Tests instantiation of AedatDataLoader. + Tests instantiation of AedatStream. """ - data_loader = AedatDataLoader(file_path="../dvs_recording.aedat4", - shape_out=(43200,)) + data_loader = AedatStream(file_path="../dvs_recording.aedat4", + shape_out=(43200,)) - self.assertIsInstance(data_loader, AedatDataLoader) + self.assertIsInstance(data_loader, AedatStream) self.assertEqual(data_loader.proc_params["file_path"], "../dvs_recording.aedat4") self.assertEqual(data_loader.proc_params["shape_out"], (43200,)) @@ -78,38 +78,39 @@ def test_unsupported_file_extension_throws_exception(self): throws an exception. """ with(self.assertRaises(ValueError)): - AedatDataLoader(file_path="test_aedat_data_loader.py", - shape_out=(43200,)) + AedatStream(file_path="test_aedat_data_loader.py", + shape_out=(43200,)) def test_missing_file_throws_exception(self): """ Tests whether an exception is thrown when a specified file does not exist. """ with(self.assertRaises(FileNotFoundError)): - AedatDataLoader(file_path="missing_file.aedat4", - shape_out=(43200,)) + AedatStream(file_path="missing_file.aedat4", + shape_out=(43200,)) def test_invalid_shape_throws_exception(self): """ Tests whether a shape_out argument with an invalid shape throws an exception. """ with(self.assertRaises(ValueError)): - AedatDataLoader(file_path="../dvs_recording.aedat4", - shape_out=(240, 180)) + AedatStream(file_path="../dvs_recording.aedat4", + shape_out=(240, 180)) def test_negative_size_throws_exception(self): """ Tests whether a shape_out argument with a negative size throws an exception. """ with(self.assertRaises(ValueError)): - AedatDataLoader(file_path="../dvs_recording.aedat4", - shape_out=(-43200,)) + AedatStream(file_path="../dvs_recording.aedat4", + shape_out=(-43200,)) + # TODO: add doc strings -class TestProcessModelAedatDataLoader(unittest.TestCase): +class TestProcessModelAedatStream(unittest.TestCase): def test_init(self): """ - Tests instantiation of the AedatDataLoader process model. + Tests instantiation of the AedatStream process model. """ proc_params = { "file_path": "../dvs_recording.aedat4", @@ -117,9 +118,9 @@ def test_init(self): "seed_sub_sampling": 0 } - pm = AedatDataLoaderPM(proc_params) + pm = AedatStreamPM(proc_params) - self.assertIsInstance(pm, AedatDataLoaderPM) + self.assertIsInstance(pm, AedatStreamPM) self.assertEqual(pm._shape_out, proc_params["shape_out"]) self.assertIsInstance(pm._file, AedatFile) self.assertIsInstance(pm._stream, @@ -128,7 +129,7 @@ def test_init(self): def test_run_without_sub_sampling(self): """ - Tests whether running yields the expectde behavior, given that the + Tests whether running yields the expected behavior, given that the user parameters are all correct. """ data_history = [ @@ -150,19 +151,17 @@ def test_run_without_sub_sampling(self): seed_rng = 0 max_num_events = 15 - data_loader = AedatDataLoader(file_path="../dvs_recording.aedat4", - shape_out=(max_num_events,), - seed_sub_sampling=seed_rng) + data_loader = AedatStream(file_path="../dvs_recording.aedat4", + shape_out=(max_num_events,), + seed_sub_sampling=seed_rng) recv_sparse = RecvSparse(shape=(max_num_events,)) data_loader.out_port.connect(recv_sparse.in_port) - # Run parameters num_steps = 5 run_cfg = Loihi1SimCfg() run_cnd = RunSteps(num_steps=1) - # Running for i in range(num_steps): data_loader.run(condition=run_cnd, run_cfg=run_cfg) @@ -179,7 +178,6 @@ def test_run_without_sub_sampling(self): np.testing.assert_equal(sent_and_received_indices, expected_indices) - # Stopping data_loader.stop() def test_sub_sampling(self): @@ -206,19 +204,17 @@ def test_sub_sampling(self): seed_rng = 0 max_num_events = 10 - data_loader = AedatDataLoader(file_path="../dvs_recording.aedat4", - shape_out=(max_num_events,), - seed_sub_sampling=seed_rng) + data_loader = AedatStream(file_path="../dvs_recording.aedat4", + shape_out=(max_num_events,), + seed_sub_sampling=seed_rng) recv_sparse = RecvSparse(shape=(max_num_events,)) data_loader.out_port.connect(recv_sparse.in_port) - # Run parameters num_steps = 5 run_cfg = Loihi1SimCfg() run_cnd = RunSteps(num_steps=1) - # Running for i in range(num_steps): data_loader.run(condition=run_cnd, run_cfg=run_cfg) @@ -232,7 +228,6 @@ def test_sub_sampling(self): np.testing.assert_equal(sent_and_received_indices, expected_indices[i]) - # Stopping data_loader.stop() def test_sub_sampling_seed(self): @@ -262,12 +257,12 @@ def test_sub_sampling_seed(self): seed_rng_run_1 = 0 seed_rng_run_2 = 1 - data_loader_1 = AedatDataLoader(file_path="../dvs_recording.aedat4", - shape_out=(max_num_events,), - seed_sub_sampling=seed_rng_run_1) - data_loader_2 = AedatDataLoader(file_path="../dvs_recording.aedat4", - shape_out=(max_num_events,), - seed_sub_sampling=seed_rng_run_2) + data_loader_1 = AedatStream(file_path="../dvs_recording.aedat4", + shape_out=(max_num_events,), + seed_sub_sampling=seed_rng_run_1) + data_loader_2 = AedatStream(file_path="../dvs_recording.aedat4", + shape_out=(max_num_events,), + seed_sub_sampling=seed_rng_run_2) recv_sparse_1 = RecvSparse(shape=(max_num_events,)) recv_sparse_2 = RecvSparse(shape=(max_num_events,)) @@ -282,7 +277,7 @@ def test_sub_sampling_seed(self): for i in range(num_steps): data_loader_1.run(condition=run_cnd, run_cfg=run_cfg) - sent_and_received_indices_1.append\ + sent_and_received_indices_1.append \ (recv_sparse_1.idx.get()[:len(expected_indices_seed_1[i])]) np.testing.assert_equal(sent_and_received_indices_1, @@ -293,7 +288,7 @@ def test_sub_sampling_seed(self): for i in range(num_steps): data_loader_2.run(condition=run_cnd, run_cfg=run_cfg) - sent_and_received_indices_2.append\ + sent_and_received_indices_2.append \ (recv_sparse_2.idx.get()[:len(expected_indices_seed_1[i])]) np.testing.assert_equal(sent_and_received_indices_2, @@ -313,9 +308,9 @@ def test_end_of_file(self): seed_rng = 0 max_num_events = 15 - data_loader = AedatDataLoader(file_path="../dvs_recording.aedat4", - shape_out=(max_num_events,), - seed_sub_sampling=seed_rng) + data_loader = AedatStream(file_path="../dvs_recording.aedat4", + shape_out=(max_num_events,), + seed_sub_sampling=seed_rng) recv_sparse = RecvSparse(shape=(max_num_events,)) data_loader.out_port.connect(recv_sparse.in_port) @@ -330,20 +325,22 @@ def test_end_of_file(self): data_loader.run(condition=run_cnd, run_cfg=run_cfg) # get data from the first 5 timesteps if i in range(5): - data_time_steps_1_to_5.append\ + data_time_steps_1_to_5.append \ (recv_sparse.data.get()) - indices_time_steps_1_to_5.append\ + indices_time_steps_1_to_5.append \ (recv_sparse.idx.get()) # get data from timesteps 28-32 - if i in range(27,32): - data_time_steps_28_to_32.append\ + if i in range(27, 32): + data_time_steps_28_to_32.append \ (recv_sparse.data.get()) - indices_time_steps_28_to_32.append\ + indices_time_steps_28_to_32.append \ (recv_sparse.idx.get()) - np.testing.assert_equal(data_time_steps_1_to_5, data_time_steps_28_to_32) - np.testing.assert_equal(indices_time_steps_1_to_5, indices_time_steps_28_to_32) + np.testing.assert_equal(data_time_steps_1_to_5, + data_time_steps_28_to_32) + np.testing.assert_equal(indices_time_steps_1_to_5, + indices_time_steps_28_to_32) # Stopping data_loader.stop() @@ -382,9 +379,9 @@ def test_index_encoding(self): dense_shape = (240, 180) max_num_events = 15 - data_loader = AedatDataLoader(file_path="../dvs_recording.aedat4", - shape_out=(max_num_events,), - seed_sub_sampling=seed_rng) + data_loader = AedatStream(file_path="../dvs_recording.aedat4", + shape_out=(max_num_events,), + seed_sub_sampling=seed_rng) recv_sparse = RecvSparse(shape=(max_num_events,)) data_loader.out_port.connect(recv_sparse.in_port) diff --git a/tests/lava/proc/event_data/io/test_dv_stream.py b/tests/lava/proc/event_data/io/test_dv_stream.py index 76c626500..728f90974 100644 --- a/tests/lava/proc/event_data/io/test_dv_stream.py +++ b/tests/lava/proc/event_data/io/test_dv_stream.py @@ -19,7 +19,7 @@ from lava.magma.core.model.py.model import PyLoihiProcessModel from lava.magma.core.run_conditions import RunSteps from lava.magma.core.run_configs import Loihi1SimCfg -from lava.proc.event_data.event_data_loader.dv_stream import DvStream, DvStreamPM +from lava.proc.event_data.io.dv_stream import DvStream, DvStreamPM class TestProcessDvStream(unittest.TestCase): diff --git a/tests/lava/proc/event_data/test_integration.py b/tests/lava/proc/event_data/test_integration.py index e194c1d56..bad23dd77 100644 --- a/tests/lava/proc/event_data/test_integration.py +++ b/tests/lava/proc/event_data/test_integration.py @@ -2,35 +2,24 @@ # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ -import unittest - -from lava.proc.event_data.event_pre_processor.utils import DownSamplingMethodDense - -from lava.proc.event_data.event_data_loader.aedat_data_loader import AedatDataLoader -from lava.proc.event_data.event_pre_processor.sparse_to_sparse.binary_to_unary_polarity \ - import BinaryToUnaryPolarity -from lava.proc.event_data.event_pre_processor.sparse_to_dense.sparse_to_dense import \ - SparseToDense -from lava.proc.event_data.event_pre_processor.dense_to_dense.down_sampling_dense import \ - DownSamplingDense - import numpy as np +import unittest from lava.magma.core.process.process import AbstractProcess -from lava.magma.core.process.ports.ports import InPort, OutPort +from lava.magma.core.process.ports.ports import InPort from lava.magma.core.process.variable import Var - from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol -from lava.magma.core.model.py.ports import PyInPort, PyOutPort +from lava.magma.core.model.py.ports import PyInPort from lava.magma.core.model.py.type import LavaPyType from lava.magma.core.resources import CPU from lava.magma.core.decorator import implements, requires from lava.magma.core.model.py.model import PyLoihiProcessModel - from lava.magma.core.run_conditions import RunSteps from lava.magma.core.run_configs import Loihi1SimCfg - -import matplotlib.pyplot as plt +from lava.proc.event_data.io.aedat_stream import AedatStream +from lava.proc.event_data.binary_to_unary_polarity.process \ + import BinaryToUnaryPolarity +from lava.proc.event_data.events_to_frame.process import EventsToFrame class RecvDense(AbstractProcess): @@ -57,7 +46,7 @@ def run_spk(self) -> None: class TestEventDataIntegration(unittest.TestCase): - def test_integration(self): + def test_integration_aedat_stream(self): x_history = [ [8, 12, 13, 13, 13, 9, 14, 14, 13, 13, 8, 9, 9, 13, 9], [8, 9, 12, 7, 12, 12, 20, 19, 10], @@ -85,44 +74,34 @@ def test_integration(self): seed_rng = 0 rng = np.random.default_rng(seed=seed_rng) - # AedatDataLoader parameters - adl_file_path = "dvs_recording.aedat4" - adl_max_num_events_out = 10 - adl_shape_out = (adl_max_num_events_out,) + # AedatStream parameters + as_file_path = "dvs_recording.aedat4" + as_max_num_events_out = 10 + as_shape_out = (as_max_num_events_out,) # BinaryToUnaryPolarity parameters - btup_shape = adl_shape_out - # SparseToDense parameters - std_shape_in = btup_shape - std_width_out = 240 - std_height_out = 180 - std_shape_out = (std_width_out, std_height_out) - # DownSamplingDense parameters - dss_shape_in = std_shape_out - dss_down_sampling_method = DownSamplingMethodDense.MAX_POOLING - dss_down_sampling_factor = 1 + btup_shape = as_shape_out + # EventsToFrame parameters + etf_shape_in = btup_shape + etf_width_out = 240 + etf_height_out = 180 + etf_num_channels = 1 + etf_shape_out = (etf_width_out, etf_height_out, etf_num_channels) # RecvDense parameters - rd_shape = (dss_shape_in[0] // dss_down_sampling_factor, - dss_shape_in[1] // dss_down_sampling_factor) + rd_shape = etf_shape_out # Instantiating Processes - aedat_data_loader = AedatDataLoader(file_path=adl_file_path, - shape_out=adl_shape_out, - seed_sub_sampling=seed_rng) + aedat_stream = AedatStream(file_path=as_file_path, + shape_out=as_shape_out, + seed_sub_sampling=seed_rng) binary_to_unary_polarity = BinaryToUnaryPolarity(shape=btup_shape) - sparse_to_dense = SparseToDense(shape_in=std_shape_in, - shape_out=std_shape_out) - down_sampling_dense = DownSamplingDense( - shape_in=dss_shape_in, - down_sampling_method=dss_down_sampling_method, - down_sampling_factor=dss_down_sampling_factor - ) + events_to_frame = EventsToFrame(shape_in=etf_shape_in, + shape_out=etf_shape_out) recv_dense = RecvDense(shape=rd_shape) # Connecting Processes - aedat_data_loader.out_port.connect(binary_to_unary_polarity.in_port) - binary_to_unary_polarity.out_port.connect(sparse_to_dense.in_port) - sparse_to_dense.out_port.connect(down_sampling_dense.in_port) - down_sampling_dense.out_port.connect(recv_dense.in_port) + aedat_stream.out_port.connect(binary_to_unary_polarity.in_port) + binary_to_unary_polarity.out_port.connect(events_to_frame.in_port) + events_to_frame.out_port.connect(recv_dense.in_port) # Run parameters num_steps = 9 @@ -131,7 +110,7 @@ def test_integration(self): # Running for i in range(num_steps): - aedat_data_loader.run(condition=run_cnd, run_cfg=run_cfg) + aedat_stream.run(condition=run_cnd, run_cfg=run_cfg) xs = np.array(x_history[i]) ys = np.array(y_history[i]) @@ -139,22 +118,22 @@ def test_integration(self): sent_and_received_data = \ recv_dense.data.get().astype(int) - if xs.shape[0] > adl_max_num_events_out: + if xs.shape[0] > as_max_num_events_out: data_idx_array = np.arange(0, xs.shape[0]) sampled_idx = rng.choice(data_idx_array, - adl_max_num_events_out, + as_max_num_events_out, replace=False) xs = xs[sampled_idx] ys = ys[sampled_idx] - expected_data = np.zeros(std_shape_out) + expected_data = np.zeros(etf_shape_out) expected_data[xs, ys] = 1 np.testing.assert_equal(sent_and_received_data, expected_data) # Stopping - aedat_data_loader.stop() + aedat_stream.stop() if __name__ == '__main__': diff --git a/tests/lava/proc/event_data/to_frame/test_to_frame.py b/tests/lava/proc/event_data/to_frame/test_to_frame.py deleted file mode 100644 index 270198ccd..000000000 --- a/tests/lava/proc/event_data/to_frame/test_to_frame.py +++ /dev/null @@ -1,275 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: BSD-3-Clause -# See: https://spdx.org/licenses/ - -import numpy as np -import typing as ty -import unittest - -from lava.magma.core.process.process import AbstractProcess -from lava.magma.core.process.ports.ports import InPort, OutPort -from lava.magma.core.process.variable import Var -from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol -from lava.magma.core.model.py.ports import PyInPort, PyOutPort -from lava.magma.core.model.py.type import LavaPyType -from lava.magma.core.resources import CPU -from lava.magma.core.decorator import implements, requires -from lava.magma.core.model.py.model import PyLoihiProcessModel -from lava.magma.core.run_conditions import RunSteps -from lava.magma.core.run_configs import Loihi1SimCfg -from lava.proc.event_data.to_frame.models import ToFrame, ToFramePM - -# TODO: add doc strings -class RecvDense(AbstractProcess): - def __init__(self, - shape: ty.Union[ty.Tuple[int, int], ty.Tuple[int, int, int]]) -> None: - super().__init__(shape=shape) - - self.in_port = InPort(shape=shape) - - self.data = Var(shape=shape, init=np.zeros(shape, dtype=int)) - - -@implements(proc=RecvDense, protocol=LoihiProtocol) -@requires(CPU) -class PyRecvDensePM(PyLoihiProcessModel): - in_port: PyInPort = LavaPyType(PyInPort.VEC_DENSE, int) - - data: np.ndarray = LavaPyType(np.ndarray, int) - - def run_spk(self) -> None: - data = self.in_port.recv() - - self.data = data - - -class SendSparse(AbstractProcess): - def __init__(self, - shape: ty.Tuple[int], - data: np.ndarray, - indices: np.ndarray) -> None: - super().__init__(shape=shape, data=data, indices=indices) - - self.out_port = OutPort(shape=shape) - - -@implements(proc=SendSparse, protocol=LoihiProtocol) -@requires(CPU) -class PySendSparsePM(PyLoihiProcessModel): - out_port: PyOutPort = LavaPyType(PyOutPort.VEC_SPARSE, int) - - def __init__(self, proc_params): - super().__init__(proc_params) - self._data = proc_params["data"] - self._indices = proc_params["indices"] - - def run_spk(self) -> None: - data = self._data - idx = self._indices - - self.out_port.send(data, idx) - - -class TestProcessSparseToDense(unittest.TestCase): - def test_init_2d(self): - """Tests instantiation of SparseToDense for a 2D output.""" - to_frame = ToFrame(shape_in=(43200,), - shape_out=(240, 180)) - - self.assertIsInstance(to_frame, ToFrame) - self.assertEqual(to_frame.proc_params["shape_in"], (43200,)) - self.assertEqual(to_frame.proc_params["shape_out"], (240, 180)) - - def test_init_3d(self): - """Tests instantiation of SparseToDense for a 3D output.""" - to_frame = ToFrame(shape_in=(43200,), - shape_out=(240, 180, 2)) - - self.assertIsInstance(to_frame, ToFrame) - self.assertEqual(to_frame.proc_params["shape_in"], (43200,)) - self.assertEqual(to_frame.proc_params["shape_out"], (240, 180, 2)) - - def test_invalid_shape_out_throws_exception(self): - """Tests whether an exception is thrown when a 1d or 4d value for the shape_out argument is given.""" - # TODO: should the 4D+ case rather raise a NotImplementedError? - with(self.assertRaises(ValueError)): - ToFrame(shape_in=(43200,), - shape_out=(240,)) - - with(self.assertRaises(ValueError)): - ToFrame(shape_in=(43200,), - shape_out=(240, 180, 2, 1)) - - def test_invalid_shape_in_throws_exception(self): - """Tests whether a shape_in argument that isn't (n,) throws an exception.""" - with(self.assertRaises(ValueError)): - ToFrame(shape_in=(43200, 1), - shape_out=(240, 180)) - - def test_third_dimension_not_2_throws_exception(self): - """Tests whether an exception is thrown if the value of the 3rd dimension for the - shape_out argument is not 2.""" - with(self.assertRaises(ValueError)): - ToFrame(shape_in=(43200,), - shape_out=(240, 180, 1)) - - def test_negative_size_shape_in_throws_exception(self): - """Tests whether an exception is thrown when a negative integer for the shape_in - argument is given""" - with(self.assertRaises(ValueError)): - ToFrame(shape_in=(-43200,), - shape_out=(240, 180)) - - def test_negative_width_or_height_shape_out_throws_exception(self): - """Tests whether an exception is thrown when a negative width or height for the - shape_out argument is given""" - with(self.assertRaises(ValueError)): - ToFrame(shape_in=(43200,), - shape_out=(-240, 180)) - - with(self.assertRaises(ValueError)): - ToFrame(shape_in=(43200,), - shape_out=(240, -180)) - - -#TODO: add doc strings -class TestProcessModelSparseToDense(unittest.TestCase): - def test_init(self): - """Tests instantiation of the SparseToDense process model.""" - proc_params = { - "shape_out": (240, 180) - } - - pm = ToFramePM(proc_params) - - self.assertIsInstance(pm, ToFramePM) - self.assertEqual(pm._shape_out, proc_params["shape_out"]) - -# TODO: can be deleted I guess - def test_run(self): - data = np.array([1, 1, 1, 1, 1, 1]) - xs = [0, 1, 2, 1, 2, 4] - ys = [0, 2, 1, 5, 7, 4] - indices = np.ravel_multi_index((xs, ys), (8, 8)) - - send_sparse = SendSparse(shape=(10, ), data=data, indices=indices) - to_frame = ToFrame(shape_in=(10, ), - shape_out=(8, 8)) - - send_sparse.out_port.connect(to_frame.in_port) - - # Run parameters - num_steps = 1 - run_cfg = Loihi1SimCfg() - run_cnd = RunSteps(num_steps=num_steps) - - # Running - to_frame.run(condition=run_cnd, run_cfg=run_cfg) - - # Stopping - to_frame.stop() - - self.assertFalse(to_frame.runtime._is_running) - - def test_2d(self): - data = np.array([1, 1, 1, 1, 1, 1]) - xs = [0, 1, 2, 1, 2, 4] - ys = [0, 2, 1, 5, 7, 4] - indices = np.ravel_multi_index((xs, ys), (8, 8)) - - expected_data = np.zeros((8, 8)) - expected_data[0, 0] = 1 - expected_data[1, 2] = 1 - expected_data[2, 1] = 1 - - expected_data[1, 5] = 1 - expected_data[2, 7] = 1 - - expected_data[4, 4] = 1 - - send_sparse = SendSparse(shape=(10, ), data=data, indices=indices) - to_frame = ToFrame(shape_in=(10, ), - shape_out=(8, 8)) - recv_dense = RecvDense(shape=(8, 8)) - - send_sparse.out_port.connect(to_frame.in_port) - to_frame.out_port.connect(recv_dense.in_port) - - # Run parameters - num_steps = 1 - run_cfg = Loihi1SimCfg() - run_cnd = RunSteps(num_steps=num_steps) - - # Running - to_frame.run(condition=run_cnd, run_cfg=run_cfg) - - sent_and_received_data = \ - recv_dense.data.get() - - # Stopping - to_frame.stop() - - np.testing.assert_equal(sent_and_received_data, - expected_data) - - def test_3d(self): - data = np.array([1, 0, 1, 0, 1, 0]) - xs = [0, 1, 2, 1, 2, 4] - ys = [0, 2, 1, 5, 7, 4] - indices = np.ravel_multi_index((xs, ys), (8, 8)) - - expected_data = np.zeros((8, 8, 2)) - expected_data[0, 0, 1] = 1 - expected_data[1, 2, 0] = 1 - expected_data[2, 1, 1] = 1 - - expected_data[1, 5, 0] = 1 - expected_data[2, 7, 1] = 1 - - expected_data[4, 4, 0] = 1 - - send_sparse = SendSparse(shape=(10,), data=data, indices=indices) - to_frame = ToFrame(shape_in=(10,), - shape_out=(8, 8, 2)) - recv_dense = RecvDense(shape=(8, 8, 2)) - - send_sparse.out_port.connect(to_frame.in_port) - to_frame.out_port.connect(recv_dense.in_port) - - # Run parameters - num_steps = 1 - run_cfg = Loihi1SimCfg() - run_cnd = RunSteps(num_steps=num_steps) - - # Running - to_frame.run(condition=run_cnd, run_cfg=run_cfg) - - sent_and_received_data = \ - recv_dense.data.get() - - # Stopping - to_frame.stop() - - # # TODO : REMOVE THIS AFTER DEBUG - # expected_data_im = np.zeros((8, 8)) - # expected_data_im[expected_data[:, :, 0] == 1] = -1 - # expected_data_im[expected_data[:, :, 1] == 1] = 1 - # actual_data_im = np.zeros((8, 8)) - # actual_data_im[sent_and_received_data[:, :, 0] == 1] = -1 - # actual_data_im[sent_and_received_data[:, :, 1] == 1] = 1 - # - # fig, (ax1, ax2) = plt.subplots(1, 2) - # fig.suptitle('3D') - # ax1.imshow(expected_data_im) - # ax1.set_title("Expected data") - # ax2.imshow(actual_data_im) - # ax2.set_title("Actual data") - # - # fig.show() - - np.testing.assert_equal(sent_and_received_data, - expected_data) - - -if __name__ == '__main__': - unittest.main() diff --git a/tests/lava/proc/max_pooling/test_models.py b/tests/lava/proc/max_pooling/test_models.py index 253b85d3e..879d0861d 100644 --- a/tests/lava/proc/max_pooling/test_models.py +++ b/tests/lava/proc/max_pooling/test_models.py @@ -17,10 +17,17 @@ from lava.magma.core.run_conditions import RunSteps from lava.magma.core.run_configs import Loihi1SimCfg from lava.proc.max_pooling.process import MaxPooling -from lava.proc.max_pooling.models import MaxPoolingPM +from lava.proc.max_pooling.models import PyMaxPoolingPM class RecvDense(AbstractProcess): + """Process that receives arbitrary dense data. + + Parameters + ---------- + shape: tuple + Shape of the InPort and Var. + """ def __init__(self, shape: tuple) -> None: super().__init__(shape=shape) @@ -33,6 +40,7 @@ def __init__(self, @implements(proc=RecvDense, protocol=LoihiProtocol) @requires(CPU) class PyRecvDensePM(PyLoihiProcessModel): + """Receives dense data from PyInPort and stores it in a Var.""" in_port: PyInPort = LavaPyType(PyInPort.VEC_DENSE, int) data: np.ndarray = LavaPyType(np.ndarray, int) @@ -44,6 +52,13 @@ def run_spk(self) -> None: class SendDense(AbstractProcess): + """Process that sends arbitrary dense data. + + Parameters + ---------- + shape: tuple + Shape of the OutPort. + """ def __init__(self, shape: tuple, data: np.ndarray) -> None: @@ -55,6 +70,7 @@ def __init__(self, @implements(proc=SendDense, protocol=LoihiProtocol) @requires(CPU) class PySendDensePM(PyLoihiProcessModel): + """Sends dense data to PyOutPort.""" out_port: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, int) def __init__(self, proc_params): @@ -69,9 +85,9 @@ def run_spk(self) -> None: class TestProcessModelMaxPooling(unittest.TestCase): def test_init(self): - pm = MaxPoolingPM() + pm = PyMaxPoolingPM() - self.assertIsInstance(pm, MaxPoolingPM) + self.assertIsInstance(pm, PyMaxPoolingPM) def test_max_pooling(self): data = np.zeros((8, 8, 1)) @@ -296,5 +312,6 @@ def test_max_pooling_stride_different_than_kernel_size(self): np.testing.assert_equal(sent_and_received_data, expected_data) + if __name__ == '__main__': unittest.main() diff --git a/tests/lava/proc/max_pooling/test_process.py b/tests/lava/proc/max_pooling/test_process.py index 07bf34eff..ca1d9b3c1 100644 --- a/tests/lava/proc/max_pooling/test_process.py +++ b/tests/lava/proc/max_pooling/test_process.py @@ -61,3 +61,7 @@ def test_negative_padding_throws_exception(self): MaxPooling(shape_in=(240, 180, 1), kernel_size=2, padding=-1) + + +if __name__ == '__main__': + unittest.main() \ No newline at end of file From dc02db1bccb1c9754461e59d3294f6624bc2baad Mon Sep 17 00:00:00 2001 From: Mathis Richter Date: Thu, 8 Dec 2022 10:23:26 +0100 Subject: [PATCH 18/32] Cleaned up DvStream and tests. Signed-off-by: Mathis Richter --- src/lava/proc/event_data/io/dv_stream.py | 82 +++++---- .../lava/proc/event_data/io/test_dv_stream.py | 157 +++++++++++------- 2 files changed, 133 insertions(+), 106 deletions(-) diff --git a/src/lava/proc/event_data/io/dv_stream.py b/src/lava/proc/event_data/io/dv_stream.py index 7efc89f08..2f8428cc6 100644 --- a/src/lava/proc/event_data/io/dv_stream.py +++ b/src/lava/proc/event_data/io/dv_stream.py @@ -1,4 +1,9 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + import typing as ty +from dv import NetworkNumpyEventPacketInput from lava.magma.core.decorator import implements, requires from lava.magma.core.model.py.model import PyLoihiProcessModel @@ -8,68 +13,56 @@ from lava.magma.core.process.process import AbstractProcess from lava.magma.core.resources import CPU from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol -from dv import NetworkNumpyEventPacketInput -class DvStream(AbstractProcess): - """ - Parameters - ---------- - """ +class DvStream(AbstractProcess): def __init__(self, *, address: str, port: int, - shape_out: ty.Tuple[int]) -> None: + shape_out: ty.Tuple[int], + **kwargs) -> None: super().__init__(address=address, port=port, - shape_out=shape_out) - self._validate_shape_out(shape_out) - self._validate_port(port) + shape_out=shape_out, + **kwargs) self._validate_address(address) - self.out_port = OutPort(shape=shape_out) + self._validate_port(port) + self._validate_shape(shape_out) + self.out_port = OutPort(shape=shape_out) @staticmethod - def _validate_shape_out(shape_out: ty.Tuple[int]) -> None: - """ - Checks whether the given shape is valid and that the size given - is not a negative number. Raises relevant exception if not - """ - if len(shape_out) != 1: - raise ValueError(f"Shape of the OutPort should be (n,). " - f"{shape_out} was given.") - if shape_out[0] <= 0: - raise ValueError(f"Max number of events should be positive. " - f"{shape_out} was given.") + def _validate_address(address: str) -> None: + """Check that address is not an empty string or None.""" + if not address: + raise ValueError("Address parameter not specified." + "The address must be an IP address or domain.") @staticmethod def _validate_port(port: int) -> None: - """ - Check whether the given port is valid. Raises relevant exception if not - """ - - if not (0 <= port <= 65535): - raise ValueError(f"Port should be between 0 and 65535" - f"{port} was given.") + """Check whether the given port number is valid.""" + _min = 0 + _max = 65535 + if not (_min <= port <= _max): + raise ValueError(f"Port number must be an integer between {_min=} " + f"and {_max=}; got {port=}.") @staticmethod - def _validate_address(address: str) -> None: - """ - Check that address is not an ampty string. Raises relevant exception if not - """ - - if not address: - raise ValueError("Address should not be empty") + def _validate_shape(shape: ty.Tuple[int]) -> None: + """Check that shape one-dimensional with a positive size.""" + if len(shape) != 1: + raise ValueError(f"Shape of the OutPort should be (n,); " + f"got {shape=}.") + if shape[0] <= 0: + raise ValueError(f"Size of the shape (maximum number of events) " + f"must be positive; got {shape=}.") @implements(proc=DvStream, protocol=LoihiProtocol) @requires(CPU) class DvStreamPM(PyLoihiProcessModel): - """ - Implementation of the DvStream process on Loihi, with sparse - representation of events. - """ + """Python ProcessModel of the DvStream Process""" out_port: PyOutPort = LavaPyType(PyOutPort.VEC_SPARSE, int) def __init__(self, proc_params: dict) -> None: @@ -77,8 +70,9 @@ def __init__(self, proc_params: dict) -> None: self._address = proc_params["address"] self._port = proc_params["port"] self._shape_out = proc_params["shape_out"] - self._event_stream = proc_params.get("_event_stream") + self._event_stream = proc_params.get("event_stream") if not self._event_stream: - self._event_stream = NetworkNumpyEventPacketInput(address=self._address, port=self._port) - - + self._event_stream = NetworkNumpyEventPacketInput( + address=self._address, + port=self._port + ) diff --git a/tests/lava/proc/event_data/io/test_dv_stream.py b/tests/lava/proc/event_data/io/test_dv_stream.py index 728f90974..dac6fe69d 100644 --- a/tests/lava/proc/event_data/io/test_dv_stream.py +++ b/tests/lava/proc/event_data/io/test_dv_stream.py @@ -2,18 +2,16 @@ # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ -from dv import AedatFile -from dv.AedatFile import _AedatFileEventNumpyPacketIterator +import unittest import numpy as np import typing as ty -import unittest from lava.magma.core.process.process import AbstractProcess -from lava.magma.core.process.ports.ports import InPort, OutPort +from lava.magma.core.process.ports.ports import InPort from lava.magma.core.process.variable import Var from lava.magma.core.resources import CPU from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol -from lava.magma.core.model.py.ports import PyInPort, PyOutPort +from lava.magma.core.model.py.ports import PyInPort from lava.magma.core.model.py.type import LavaPyType from lava.magma.core.decorator import implements, requires from lava.magma.core.model.py.model import PyLoihiProcessModel @@ -23,101 +21,136 @@ class TestProcessDvStream(unittest.TestCase): - def test_init(self): - """ - Tests instantiation of AedatDataLoader. - """ - + def test_init(self) -> None: + """Tests instantiation of AedatDataLoader.""" stream = DvStream(address="127.0.0.1", port=7777, - shape_out=(43200,)) + shape_out=(43200,), + additional_kwarg=5) self.assertIsInstance(stream, DvStream) - self.assertEqual((43200,), stream.out_port.shape) + self.assertEqual(stream.out_port.shape, (43200,)) + self.assertEqual(stream.proc_params["additional_kwarg"], 5) - def test_invalid_shape_throws_exception(self): - """ - Tests whether a shape_out argument with an invalid shape throws an exception. - """ + def test_invalid_shape_throws_exception(self) -> None: + """Tests whether a shape that is invalid (not one-dimensional) throws + an exception.""" + invalid_shape = (240, 180) with(self.assertRaises(ValueError)): DvStream(address="127.0.0.1", port=7777, - shape_out=(240, 180)) + shape_out=invalid_shape) - def test_negative_size_throws_exception(self): - """ - Tests whether a shape_out argument with a negative size throws an exception. - """ + def test_negative_size_throws_exception(self) -> None: + """Tests whether a shape with a negative size throws an exception.""" + invalid_shape = (-43200,) with(self.assertRaises(ValueError)): DvStream(address="127.0.0.1", port=7777, - shape_out=(-240,)) + shape_out=invalid_shape) - def test_negative_port_throws_exception(self): - """ - Tests whether a port argument with a negative size throws an exception. - """ + def test_negative_port_throws_exception(self) -> None: + """Tests whether a negative port throws an exception.""" + min_port = 0 + invalid_port = min_port - 1 with(self.assertRaises(ValueError)): DvStream(address="127.0.0.1", - port=-7777, + port=invalid_port, shape_out=(43200,)) - def test_port_out_of_range_throws_exception(self): - """ - Tests whether a port argument that is out of range throws an error. - """ + def test_port_out_of_range_throws_exception(self) -> None: + """Tests whether a positive port that is too large throws an + exception.""" + max_port = 65535 + invalid_port = max_port + 1 with(self.assertRaises(ValueError)): DvStream(address="127.0.0.1", - port=7777777, + port=invalid_port, shape_out=(43200,)) - def test_address_empty_string_throws_exception(self): + def test_address_empty_string_throws_exception(self) -> None: + """Tests whether an empty address throws an exception.""" + invalid_address = "" with(self.assertRaises(ValueError)): - DvStream(address="", + DvStream(address=invalid_address, port=7777, shape_out=(43200,)) -class TestProcessModelDvStream(unittest.TestCase): - def test_init(self): - """ - Tests instantiation of the DvStream process model. - """ - proc_params = { - "address": "127.0.0.1", - "port": 7777, - "shape_out": (43200,), - } - pm = DvStreamPM(proc_params) +class RecvSparse(AbstractProcess): + """Process that receives arbitrary sparse data. - self.assertIsInstance(pm, DvStreamPM) + Parameters + ---------- + shape: tuple + Shape of the InPort and Vars. + """ + def __init__(self, + shape: ty.Tuple[int]) -> None: + super().__init__(shape=shape) + + self.in_port = InPort(shape=shape) + + self.data = Var(shape=shape, init=np.zeros(shape, dtype=int)) + self.idx = Var(shape=shape, init=np.zeros(shape, dtype=int)) - def test_run_spike(self): - class PacketInput(ty.Protocol): - def __next__(self): - ... - class MockPacketInput: - def __next__(self): - return { - "x": 35, - "y": 35, - "polarity": 0, - } +@implements(proc=RecvSparse, protocol=LoihiProtocol) +@requires(CPU) +class PyRecvSparsePM(PyLoihiProcessModel): + """Receives sparse data from PyInPort and stores a padded version of + received data and indices in Vars.""" + in_port: PyInPort = LavaPyType(PyInPort.VEC_SPARSE, int) + data: np.ndarray = LavaPyType(np.ndarray, int) + idx: np.ndarray = LavaPyType(np.ndarray, int) - proc_params = { + def run_spk(self) -> None: + data, idx = self.in_port.recv() + + self.data = np.pad(data, + pad_width=(0, self.in_port.shape[0] - data.shape[0])) + self.idx = np.pad(idx, + pad_width=(0, self.in_port.shape[0] - data.shape[0])) + + +class MockPacketInput: + def __next__(self): + return { + "x": 35, + "y": 35, + "polarity": 0, + } + + +class TestProcessModelDvStream(unittest.TestCase): + def setUp(self) -> None: + self.proc_params = { "address": "127.0.0.1", "port": 7777, "shape_out": (43200,), - "_event_stream": MockPacketInput() - + "event_stream": MockPacketInput() } - pm = DvStreamPM(proc_params) - pm.run_spk() + def test_init(self) -> None: + """Tests instantiation of the DvStream PyProcModel.""" + pm = DvStreamPM(proc_params=self.proc_params) + self.assertIsInstance(pm, DvStreamPM) + + def test_run_spk(self) -> None: + max_num_events = 15 + shape = (max_num_events,) + + dv_stream = DvStream(address="127.0.0.1", + port=7777, + shape_out=shape, + event_stream=MockPacketInput()) + recv_sparse = RecvSparse(shape=shape) + dv_stream.out_port.connect(recv_sparse.in_port) + dv_stream.run(condition=RunSteps(num_steps=1), run_cfg=Loihi1SimCfg()) + dv_stream.stop() if __name__ == '__main__': From 1581cac8e3ec1f844065873f29475fd4de7d2e45 Mon Sep 17 00:00:00 2001 From: "Cohen-Dumani, Joshua" Date: Thu, 8 Dec 2022 15:17:03 +0100 Subject: [PATCH 19/32] small changes to doc strings + modifications sub_sampling --- .../event_data/events_to_frame/process.py | 2 +- src/lava/proc/event_data/io/aedat_stream.py | 21 ++++++++++++------- src/lava/utils/events.py | 5 +---- .../proc/event_data/io/test_aedat_stream.py | 12 +++++------ 4 files changed, 22 insertions(+), 18 deletions(-) diff --git a/src/lava/proc/event_data/events_to_frame/process.py b/src/lava/proc/event_data/events_to_frame/process.py index b80da3f5d..b958dfc32 100644 --- a/src/lava/proc/event_data/events_to_frame/process.py +++ b/src/lava/proc/event_data/events_to_frame/process.py @@ -64,7 +64,7 @@ def _validate_shape_in(shape_in: ty.Tuple[int]) -> None: @staticmethod def _validate_shape_out(shape_out: ty.Tuple[int, int, int]) -> None: """Validate that a given shape is of the form (W, H, C) where W and H - are strictly positive and C either 1 or 2. + are strictly positive and C is equal to either 1 or 2. Parameters ---------- diff --git a/src/lava/proc/event_data/io/aedat_stream.py b/src/lava/proc/event_data/io/aedat_stream.py index 6f8c2b0da..a5ee58234 100644 --- a/src/lava/proc/event_data/io/aedat_stream.py +++ b/src/lava/proc/event_data/io/aedat_stream.py @@ -6,6 +6,7 @@ import numpy as np import os.path import typing as ty +import warnings from lava.magma.core.process.process import AbstractProcess from lava.magma.core.process.ports.ports import OutPort @@ -35,8 +36,7 @@ class AedatStream(AbstractProcess): shape_out : tuple (shape (n,)) The shape of the OutPort. The size of this parameter sets a maximum number of events per time-step, and the process will subsample data - in order to fit it into this port. Data which contains fewer events - will be padded with zeros. + in order to fit it into this port. seed_sub_sampling : int, optional Seed used for the random number generator that sub-samples data to @@ -110,18 +110,25 @@ def __init__(self, proc_params: dict) -> None: def run_spk(self) -> None: """ Compiles events into a batch (roughly 10ms long). The polarity data - and x and y values are then used to encode the sparse tensor. The - data is sub-sampled if necessary, and then sent out. + and x and y values are then used to encode the sparse tensor using + row-major (C-style) encoding. The data is sub-sampled if necessary, + and then sent out. """ events = self._get_next_event_batch() data, indices = self._encode_data_and_indices(events) - # If we have more data than our shape allows, subsample + # If we have more data than our shape allows, sub-sample if data.shape[0] > self._shape_out[0]: data, indices = sub_sample(data, indices, self._shape_out[0], self._random_rng) + # warn the user if we need to sub-sample + percentage_data_lost = (1 - self._shape_out[0] / data.shape[0]) * 100 + warnings.warn(f"Read {data.shape[0]} events. Maximum number of events is {self._shape_out[0]}. " + f"Removed {data.shape[0] - self._shape_out[0]} ({percentage_data_lost:.1f}%) " + f"events by subsampling.") + self.out_port.send(data, indices) def _get_next_event_batch(self): @@ -142,7 +149,7 @@ def _get_next_event_batch(self): def _init_aedat_file(self) -> None: """ - Resets the event stream + Resets the event stream. """ self._file = AedatFile(file_name=self._file_path) self._stream = self._file["events"].numpy() @@ -152,7 +159,7 @@ def _encode_data_and_indices(self, -> ty.Tuple[np.ndarray, np.ndarray]: """ Extracts the polarity data, and x and y indices from the given - batch of events, and encodes them accordingly. + batch of events, and encodes them using C-style encoding. """ xs, ys, ps = events['x'], events['y'], events['polarity'] data = ps diff --git a/src/lava/utils/events.py b/src/lava/utils/events.py index cd70e08fe..c0bee702b 100644 --- a/src/lava/utils/events.py +++ b/src/lava/utils/events.py @@ -4,7 +4,6 @@ import numpy as np import typing as ty -import warnings def sub_sample(data: np.ndarray, @@ -18,8 +17,6 @@ def sub_sample(data: np.ndarray, max_events, replace=False) - percentage_data_lost = (1 - max_events/data.shape[0])*100 - warnings.warn(f"Read {data.shape[0]} events. Maximum number of events is {max_events}. " - f"Removed {data.shape[0] - max_events} ({percentage_data_lost:.1f}%) events by subsampling.") + sampled_idx = np.sort(sampled_idx) return data[sampled_idx], indices[sampled_idx] diff --git a/tests/lava/proc/event_data/io/test_aedat_stream.py b/tests/lava/proc/event_data/io/test_aedat_stream.py index bf5a6a101..f37c31941 100644 --- a/tests/lava/proc/event_data/io/test_aedat_stream.py +++ b/tests/lava/proc/event_data/io/test_aedat_stream.py @@ -195,9 +195,9 @@ def test_sub_sampling(self): ] expected_indices = [ - [1787, 1780, 2498, 2633, 2486, 1597, 1727, 2496, 2500, 1729], + [1597., 2486., 2496., 2498., 1787., 2633., 1729., 1727., 2500., 1780.], [1600, 1732, 2297, 1388, 2290, 2305, 3704, 3519, 1911], - [1362, 1384, 7138, 1379, 1390, 2982, 1364, 2301, 2289, 1386], + [7138., 2301., 2982., 1364., 1379., 1386., 1384., 1390., 2289., 1362.], [1910, 1382, 1909, 1562, 1606, 1381], [464] ] @@ -236,17 +236,17 @@ def test_sub_sampling_seed(self): TODO: would testing on only 1 timestep be sufficient? """ expected_indices_seed_0 = [ - [1787, 1780, 2498, 2633, 2486, 1597, 1727, 2496, 2500, 1729], + [1597., 2486., 2496., 2498., 1787., 2633., 1729., 1727., 2500., 1780.], [1600, 1732, 2297, 1388, 2290, 2305, 3704, 3519, 1911], - [1362, 1384, 7138, 1379, 1390, 2982, 1364, 2301, 2289, 1386], + [7138., 2301., 2982., 1364., 1379., 1386., 1384., 1390., 2289., 1362.], [1910, 1382, 1909, 1562, 1606, 1381], [464] ] expected_indices_seed_1 = [ - [2498, 2486, 2488, 1597, 1727, 2496, 2308, 2642, 2489, 2500], + [1597., 2308., 2486., 2496., 2498., 2642., 2489., 2488., 1727., 2500.], [1600, 1732, 2297, 1388, 2290, 2305, 3704, 3519, 1911], - [1401, 1390, 1386, 1364, 2289, 7138, 1601, 2301, 1379, 1384], + [7138., 2301., 1601., 1364., 1379., 1386., 1384., 1390., 2289., 1401.], [1910, 1382, 1909, 1562, 1606, 1381], [464] ] From 5fbac8bfa58f69718285ed526e07b0fc840448d6 Mon Sep 17 00:00:00 2001 From: Mathis Richter Date: Thu, 8 Dec 2022 15:32:19 +0100 Subject: [PATCH 20/32] Cleaned BinaryToUnary Process and unit tests. Signed-off-by: Mathis Richter --- .../binary_to_unary_polarity/process.py | 12 ++-- .../__init__.py | 0 .../binary_to_unary_polarity/test_models.py | 60 ++++++++++++++++ .../test_process.py | 17 ++--- .../test_models.py => utils.py} | 72 +++---------------- 5 files changed, 83 insertions(+), 78 deletions(-) rename tests/lava/proc/event_data/{binary_to_unary => binary_to_unary_polarity}/__init__.py (100%) create mode 100644 tests/lava/proc/event_data/binary_to_unary_polarity/test_models.py rename tests/lava/proc/event_data/{binary_to_unary => binary_to_unary_polarity}/test_process.py (55%) rename tests/lava/proc/event_data/{binary_to_unary/test_models.py => utils.py} (52%) diff --git a/src/lava/proc/event_data/binary_to_unary_polarity/process.py b/src/lava/proc/event_data/binary_to_unary_polarity/process.py index 0e1ffd1f5..568c2ff24 100644 --- a/src/lava/proc/event_data/binary_to_unary_polarity/process.py +++ b/src/lava/proc/event_data/binary_to_unary_polarity/process.py @@ -32,8 +32,8 @@ def __init__(self, @staticmethod def _validate_shape(shape: ty.Tuple[int]) -> None: - """Validate that a given shape is of the form (max_num_events, ) where - max_num_events is strictly positive. + """Validate that a given shape is of the form (max_num_events,) where + max_num_events is positive. Parameters ---------- @@ -42,11 +42,9 @@ def _validate_shape(shape: ty.Tuple[int]) -> None: """ if len(shape) != 1: raise ValueError(f"Expected shape to be of the form " - f"(max_num_events, ). " - f"Found {shape=}.") + f"(max_num_events,); got {shape=}.") if shape[0] <= 0: raise ValueError(f"Expected max number of events " - f"(first element of shape) to be strictly " - f"positive. " - f"Found {shape=}.") + f"(first element of shape) to be positive; " + f"got {shape=}.") diff --git a/tests/lava/proc/event_data/binary_to_unary/__init__.py b/tests/lava/proc/event_data/binary_to_unary_polarity/__init__.py similarity index 100% rename from tests/lava/proc/event_data/binary_to_unary/__init__.py rename to tests/lava/proc/event_data/binary_to_unary_polarity/__init__.py diff --git a/tests/lava/proc/event_data/binary_to_unary_polarity/test_models.py b/tests/lava/proc/event_data/binary_to_unary_polarity/test_models.py new file mode 100644 index 000000000..188346537 --- /dev/null +++ b/tests/lava/proc/event_data/binary_to_unary_polarity/test_models.py @@ -0,0 +1,60 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import numpy as np +import unittest + +from lava.magma.core.run_conditions import RunSteps +from lava.magma.core.run_configs import Loihi1SimCfg +from lava.proc.event_data.binary_to_unary_polarity.process \ + import BinaryToUnaryPolarity +from lava.proc.event_data.binary_to_unary_polarity.models \ + import PyBinaryToUnaryPolarityPM + +from ..utils import SendSparse, RecvSparse + + +class TestProcessModelBinaryToUnaryPolarity(unittest.TestCase): + def test_init(self): + """Tests instantiation of the BinaryToUnary process model.""" + pm = PyBinaryToUnaryPolarityPM() + self.assertIsInstance(pm, PyBinaryToUnaryPolarityPM) + + def test_binary_to_unary_polarity_encoding(self): + """Tests whether the encoding from binary to unary works correctly.""" + data = np.array([1, 1, 1, 0, 1, 0, 0, 1, 0]) + indices = np.array([1, 5, 4, 3, 3, 2, 0, 1, 0]) + + expected_data = data + expected_data[expected_data == 0] = 1 + + expected_indices = indices + + send_sparse = SendSparse(shape=(10, ), data=data, indices=indices) + binary_to_unary_encoder = BinaryToUnaryPolarity(shape=(10,)) + recv_sparse = RecvSparse(shape=(10, )) + + send_sparse.out_port.connect(binary_to_unary_encoder.in_port) + binary_to_unary_encoder.out_port.connect(recv_sparse.in_port) + + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=1) + + send_sparse.run(condition=run_cnd, run_cfg=run_cfg) + + sent_and_received_data = \ + recv_sparse.data.get()[:expected_data.shape[0]] + sent_and_received_indices = \ + recv_sparse.idx.get()[:expected_indices.shape[0]] + + send_sparse.stop() + + np.testing.assert_equal(sent_and_received_data, + expected_data) + np.testing.assert_equal(sent_and_received_indices, + expected_indices) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/lava/proc/event_data/binary_to_unary/test_process.py b/tests/lava/proc/event_data/binary_to_unary_polarity/test_process.py similarity index 55% rename from tests/lava/proc/event_data/binary_to_unary/test_process.py rename to tests/lava/proc/event_data/binary_to_unary_polarity/test_process.py index 12d05f995..5787b6e91 100644 --- a/tests/lava/proc/event_data/binary_to_unary/test_process.py +++ b/tests/lava/proc/event_data/binary_to_unary_polarity/test_process.py @@ -11,21 +11,22 @@ class TestProcessBinaryToUnaryPolarity(unittest.TestCase): def test_init(self): """Tests instantiation of BinaryToUnaryPolarity.""" - converter = BinaryToUnaryPolarity(shape=(43200,)) + binary_to_unary_polarity = BinaryToUnaryPolarity(shape=(43200,)) - self.assertIsInstance(converter, BinaryToUnaryPolarity) + self.assertIsInstance(binary_to_unary_polarity, BinaryToUnaryPolarity) def test_invalid_shape_throws_exception(self): - """Tests whether a shape argument with an invalid shape - throws an exception.""" + """Tests whether an invalid shape (not one-dimensional) throws an + exception.""" + invalid_shape = (240, 180) with(self.assertRaises(ValueError)): - BinaryToUnaryPolarity(shape=(240, 180)) + BinaryToUnaryPolarity(shape=invalid_shape) def test_negative_size_throws_exception(self): - """Tests whether a shape argument with a negative size - throws an exception.""" + """Tests whether shape with a negative size throws an exception.""" + invalid_shape = (-43200,) with(self.assertRaises(ValueError)): - BinaryToUnaryPolarity(shape=(-43200,)) + BinaryToUnaryPolarity(shape=invalid_shape) if __name__ == '__main__': diff --git a/tests/lava/proc/event_data/binary_to_unary/test_models.py b/tests/lava/proc/event_data/utils.py similarity index 52% rename from tests/lava/proc/event_data/binary_to_unary/test_models.py rename to tests/lava/proc/event_data/utils.py index b25141c2b..b0ddd8255 100644 --- a/tests/lava/proc/event_data/binary_to_unary/test_models.py +++ b/tests/lava/proc/event_data/utils.py @@ -4,7 +4,6 @@ import numpy as np import typing as ty -import unittest from lava.magma.core.process.process import AbstractProcess from lava.magma.core.process.ports.ports import InPort, OutPort @@ -15,12 +14,6 @@ from lava.magma.core.resources import CPU from lava.magma.core.decorator import implements, requires from lava.magma.core.model.py.model import PyLoihiProcessModel -from lava.magma.core.run_conditions import RunSteps -from lava.magma.core.run_configs import Loihi1SimCfg -from lava.proc.event_data.binary_to_unary_polarity.process \ - import BinaryToUnaryPolarity -from lava.proc.event_data.binary_to_unary_polarity.models \ - import PyBinaryToUnaryPolarityPM class RecvSparse(AbstractProcess): @@ -54,12 +47,14 @@ class PyRecvSparsePM(PyLoihiProcessModel): def run_spk(self) -> None: data, idx = self.in_port.recv() - self.data = np.pad(data, - pad_width=( - 0, self.in_port.shape[0] - data.shape[0])) - self.idx = np.pad(idx, - pad_width=( - 0, self.in_port.shape[0] - data.shape[0])) + self.data = np.pad( + data, + pad_width=(0, self.in_port.shape[0] - data.shape[0]) + ) + self.idx = np.pad( + idx, + pad_width=(0, self.in_port.shape[0] - data.shape[0]) + ) class SendSparse(AbstractProcess): @@ -91,53 +86,4 @@ def __init__(self, proc_params): self._indices = proc_params["indices"] def run_spk(self) -> None: - data = self._data - idx = self._indices - - self.out_port.send(data, idx) - - -class TestProcessModelBinaryToUnaryPolarity(unittest.TestCase): - def test_init(self): - """Tests instantiation of the BinaryToUnary process model.""" - pm = PyBinaryToUnaryPolarityPM() - - self.assertIsInstance(pm, PyBinaryToUnaryPolarityPM) - - def test_binary_to_unary_polarity_encoding(self): - """Tests whether the encoding from binary to unary works correctly.""" - data = np.array([1, 1, 1, 0, 1, 0, 0, 1, 0]) - indices = np.array([1, 5, 4, 3, 3, 2, 0, 1, 0]) - - expected_data = data - expected_data[expected_data == 0] = 1 - - expected_indices = indices - - send_sparse = SendSparse(shape=(10, ), data=data, indices=indices) - binary_to_unary_encoder = BinaryToUnaryPolarity(shape=(10,)) - recv_sparse = RecvSparse(shape=(10, )) - - send_sparse.out_port.connect(binary_to_unary_encoder.in_port) - binary_to_unary_encoder.out_port.connect(recv_sparse.in_port) - - run_cfg = Loihi1SimCfg() - run_cnd = RunSteps(num_steps=1) - - send_sparse.run(condition=run_cnd, run_cfg=run_cfg) - - sent_and_received_data = \ - recv_sparse.data.get()[:expected_data.shape[0]] - sent_and_received_indices = \ - recv_sparse.idx.get()[:expected_indices.shape[0]] - - send_sparse.stop() - - np.testing.assert_equal(sent_and_received_data, - expected_data) - np.testing.assert_equal(sent_and_received_indices, - expected_indices) - - -if __name__ == '__main__': - unittest.main() + self.out_port.send(data=self._data, indices=self._indices) From 082c9c9f316ca099c2cfcca766bc128ed4e54d07 Mon Sep 17 00:00:00 2001 From: Mathis Richter Date: Thu, 8 Dec 2022 15:45:48 +0100 Subject: [PATCH 21/32] Simplified the ProcModel of the BinaryToUnary Process. Signed-off-by: Mathis Richter --- .../binary_to_unary_polarity/models.py | 29 ++----------------- 1 file changed, 3 insertions(+), 26 deletions(-) diff --git a/src/lava/proc/event_data/binary_to_unary_polarity/models.py b/src/lava/proc/event_data/binary_to_unary_polarity/models.py index 2632c6b9a..46a7a3a99 100644 --- a/src/lava/proc/event_data/binary_to_unary_polarity/models.py +++ b/src/lava/proc/event_data/binary_to_unary_polarity/models.py @@ -2,8 +2,6 @@ # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ -import numpy as np - from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol from lava.magma.core.model.py.ports import PyInPort, PyOutPort from lava.magma.core.model.py.type import LavaPyType @@ -20,33 +18,12 @@ class PyBinaryToUnaryPolarityPM(PyLoihiProcessModel): """PyLoihiProcessModel implementing the BinaryToUnaryPolarity Process. Transforms event-based data with binary polarity (0 for negative events, - 1 for positive events) coming from its in_port to unary polarity - (1 for negative and positive events) and sends it through its out_port. - """ + 1 for positive events) to unary polarity (1 for negative and positive + events).""" in_port: PyInPort = LavaPyType(PyInPort.VEC_SPARSE, int) out_port: PyOutPort = LavaPyType(PyOutPort.VEC_SPARSE, int) def run_spk(self) -> None: data, indices = self.in_port.recv() - - data = self._encode(data) - - self.out_port.send(data, indices) - - @staticmethod - def _encode(data: np.ndarray) -> np.ndarray: - """Transform event-based data with binary polarity to unary polarity. - - Parameters - ---------- - data : ndarray - Event-based data with binary polarity. - - Returns - ---------- - result : ndarray - Event-based data with unary polarity. - """ data[data == 0] = 1 - - return data + self.out_port.send(data, indices) From 7c4f1b418bdbd5fbbd4071b7bc2c680f2775934c Mon Sep 17 00:00:00 2001 From: Mathis Richter Date: Thu, 8 Dec 2022 15:48:42 +0100 Subject: [PATCH 22/32] Fixed docstring. Signed-off-by: Mathis Richter --- .../proc/event_data/binary_to_unary_polarity/test_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/lava/proc/event_data/binary_to_unary_polarity/test_models.py b/tests/lava/proc/event_data/binary_to_unary_polarity/test_models.py index 188346537..90599e6e0 100644 --- a/tests/lava/proc/event_data/binary_to_unary_polarity/test_models.py +++ b/tests/lava/proc/event_data/binary_to_unary_polarity/test_models.py @@ -17,7 +17,7 @@ class TestProcessModelBinaryToUnaryPolarity(unittest.TestCase): def test_init(self): - """Tests instantiation of the BinaryToUnary process model.""" + """Tests instantiation of the BinaryToUnary ProcessModel.""" pm = PyBinaryToUnaryPolarityPM() self.assertIsInstance(pm, PyBinaryToUnaryPolarityPM) From 099259f1c413fc1b13d2fff124640c37b9c85d68 Mon Sep 17 00:00:00 2001 From: Mathis Richter Date: Thu, 8 Dec 2022 16:21:18 +0100 Subject: [PATCH 23/32] Reviewed EventsToFrame Process and unit tests. Signed-off-by: Mathis Richter --- .../event_data/events_to_frame/process.py | 38 +++++++------ .../events_to_frame/test_process.py | 56 +++++++++++-------- 2 files changed, 54 insertions(+), 40 deletions(-) diff --git a/src/lava/proc/event_data/events_to_frame/process.py b/src/lava/proc/event_data/events_to_frame/process.py index b958dfc32..7c370c4d1 100644 --- a/src/lava/proc/event_data/events_to_frame/process.py +++ b/src/lava/proc/event_data/events_to_frame/process.py @@ -13,10 +13,18 @@ class EventsToFrame(AbstractProcess): frame. Output shape can be either (W, H, 1) or (W, H, 2). - (1) If output shape is (W, H, 1), input is assumed to be unary. - (2) If output shape is (W, H, 2), input is assumed to be binary. - Negative events are represented by 1s in first channel. - Positive events are represented by 1s in second channel. + + (1) If output shape is (W, H, 1), the event-based input data is assumed + to use unary polarity (all events are encoded with polarity 1). In this + case, the output frame has 1 at all coordinates that registered an event + and zero everywhere else. + + (2) If output shape is (W, H, 2), the event-based input data is assumed + to use binary polarity (negative events have polarity values of 0, positive + events have polarity values of 1). In this case, the output frame encodes + negative events by values of 1 in the first channel and positive events + by values of 1 in the second channel. All other coordinates are set to + zero. Parameters ---------- @@ -42,7 +50,7 @@ def __init__(self, @staticmethod def _validate_shape_in(shape_in: ty.Tuple[int]) -> None: - """Validate that a given shape is of the form (max_num_events, ) where + """Validate that a given shape is of the form (max_num_events,), where max_num_events is strictly positive. Parameters @@ -52,14 +60,13 @@ def _validate_shape_in(shape_in: ty.Tuple[int]) -> None: """ if len(shape_in) != 1: raise ValueError(f"Expected shape_in to be of the form " - f"(max_num_events, ). " - f"Found {shape_in=}.") + f"(max_num_events,); " + f"got {shape_in=}.") if shape_in[0] <= 0: raise ValueError(f"Expected max number of events " - f"(first element of shape_in) to be strictly " - f"positive. " - f"Found {shape_in=}.") + f"(first element of shape_in) to be positive; " + f"got {shape_in=}.") @staticmethod def _validate_shape_out(shape_out: ty.Tuple[int, int, int]) -> None: @@ -72,17 +79,16 @@ def _validate_shape_out(shape_out: ty.Tuple[int, int, int]) -> None: Shape to validate. """ if not len(shape_out) == 3: - raise ValueError(f"Expected shape_out to be 3D. " - f"Found {shape_out=}.") + raise ValueError(f"Expected shape_out to be 3D; " + f"got {shape_out=}.") if not (shape_out[2] == 1 or shape_out[2] == 2): raise ValueError(f"Expected number of channels " f"(third element of shape_out) to be either " - f"1 or 2. " - f"Found {shape_out=}.") + f"1 or 2; " + f"got {shape_out=}.") if shape_out[0] <= 0 or shape_out[1] <= 0: raise ValueError(f"Expected width and height " f"(first and second elements of shape_out) to be " - f"strictly positive. " - f"Found {shape_out=}.") + f"positive; got {shape_out=}.") diff --git a/tests/lava/proc/event_data/events_to_frame/test_process.py b/tests/lava/proc/event_data/events_to_frame/test_process.py index 0c5c97f83..c8b595177 100644 --- a/tests/lava/proc/event_data/events_to_frame/test_process.py +++ b/tests/lava/proc/event_data/events_to_frame/test_process.py @@ -8,52 +8,60 @@ class TestProcessEventsToFrame(unittest.TestCase): - def test_init(self): + def test_init(self) -> None: """Tests instantiation of SparseToDense for a 3D output.""" to_frame = EventsToFrame(shape_in=(43200,), shape_out=(240, 180, 1)) self.assertIsInstance(to_frame, EventsToFrame) - def test_invalid_shape_in_throws_exception(self): - """Tests whether a shape_in argument that isn't (n,) throws - an exception.""" + def test_invalid_shape_in_throws_exception(self) -> None: + """Tests whether an exception is thrown if shape_in is not of the + form (n,).""" + invalid_shape_in = (43200, 1) with(self.assertRaises(ValueError)): - EventsToFrame(shape_in=(43200, 1), + EventsToFrame(shape_in=invalid_shape_in, shape_out=(240, 180, 1)) - def test_invalid_shape_out_throws_exception(self): - """Tests whether an exception is thrown when a 1d or 4d value - for the shape_out argument is given.""" + def test_negative_size_shape_in_throws_exception(self) -> None: + """Tests whether an exception is thrown when a negative integer for + the shape_in argument is given.""" + invalid_shape_in = (-43200,) with(self.assertRaises(ValueError)): - EventsToFrame(shape_in=(43200,), - shape_out=(240,)) + EventsToFrame(shape_in=invalid_shape_in, + shape_out=(240, 180)) + def test_invalid_shape_out_dimensionality_throws_exception(self) -> None: + """Tests whether an exception is thrown when the dimensionality of the + shape_out parameter is not 3.""" + invalid_shape_out = (240, 180) with(self.assertRaises(ValueError)): EventsToFrame(shape_in=(43200,), - shape_out=(240, 180, 3)) + shape_out=invalid_shape_out) + def test_invalid_number_of_channels_throws_exception(self) -> None: + """Tests whether an exception is thrown when the number of channels is + larger than 2.""" + invalid_channel_size = 3 with(self.assertRaises(ValueError)): EventsToFrame(shape_in=(43200,), - shape_out=(240, 180, 2, 1)) - - def test_negative_size_shape_in_throws_exception(self): - """Tests whether an exception is thrown when a negative integer for - the shape_in argument is given""" - with(self.assertRaises(ValueError)): - EventsToFrame(shape_in=(-43200,), - shape_out=(240, 180)) + shape_out=(240, 180, invalid_channel_size)) - def test_negative_width_or_height_shape_out_throws_exception(self): - """Tests whether an exception is thrown when a negative width or height - for the shape_out argument is given""" + def test_negative_width_in_shape_out_throws_exception(self) -> None: + """Tests whether an exception is thrown when a negative width is + specified for the shape_out parameter.""" + invalid_width = -240 with(self.assertRaises(ValueError)): EventsToFrame(shape_in=(43200,), - shape_out=(-240, 180)) + shape_out=(invalid_width, 180)) + def test_negative_height_in_shape_out_throws_exception(self) -> None: + """Tests whether an exception is thrown when a negative height is + specified for the shape_out parameter.""" + invalid_height = -180 with(self.assertRaises(ValueError)): EventsToFrame(shape_in=(43200,), - shape_out=(240, -180)) + shape_out=(240, invalid_height)) if __name__ == '__main__': From 3f5ee2d7b0dd1e9a7603cb431751b59966bbbd68 Mon Sep 17 00:00:00 2001 From: Mathis Richter Date: Thu, 8 Dec 2022 16:36:07 +0100 Subject: [PATCH 24/32] Reviewed EventsToFrame ProcessModel and unit tests. Signed-off-by: Mathis Richter --- .../proc/event_data/events_to_frame/models.py | 11 +- .../event_data/events_to_frame/test_models.py | 132 +++--------------- tests/lava/proc/event_data/utils.py | 32 +++++ 3 files changed, 55 insertions(+), 120 deletions(-) diff --git a/src/lava/proc/event_data/events_to_frame/models.py b/src/lava/proc/event_data/events_to_frame/models.py index bb6b1ecc1..ba7e1a522 100644 --- a/src/lava/proc/event_data/events_to_frame/models.py +++ b/src/lava/proc/event_data/events_to_frame/models.py @@ -26,19 +26,12 @@ class PyEventsToFramePM(PyLoihiProcessModel): def run_spk(self) -> None: data, indices = self.in_port.recv() - dense_data = self._transform(data, indices) - self.out_port.send(dense_data) def _transform(self, data: np.ndarray, indices: np.ndarray) -> np.ndarray: - """Transform collection of sparsely represented events into a densely - represented frame of events. - - (1) If output shape is (W, H, 1), input is assumed to be unary. - (2) If output shape is (W, H, 2), input is assumed to be binary. - Negative events are represented by 1s in first channel. - Positive events are represented by 1s in second channel. + """Transforms from an event-based representation to a frame-based + representation. Parameters ---------- diff --git a/tests/lava/proc/event_data/events_to_frame/test_models.py b/tests/lava/proc/event_data/events_to_frame/test_models.py index 33950d239..a43b6b453 100644 --- a/tests/lava/proc/event_data/events_to_frame/test_models.py +++ b/tests/lava/proc/event_data/events_to_frame/test_models.py @@ -3,113 +3,34 @@ # See: https://spdx.org/licenses/ import numpy as np -import typing as ty import unittest -from lava.magma.core.process.process import AbstractProcess -from lava.magma.core.process.ports.ports import InPort, OutPort -from lava.magma.core.process.variable import Var -from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol -from lava.magma.core.model.py.ports import PyInPort, PyOutPort -from lava.magma.core.model.py.type import LavaPyType -from lava.magma.core.resources import CPU -from lava.magma.core.decorator import implements, requires -from lava.magma.core.model.py.model import PyLoihiProcessModel from lava.magma.core.run_conditions import RunSteps from lava.magma.core.run_configs import Loihi1SimCfg from lava.proc.event_data.events_to_frame.process import EventsToFrame from lava.proc.event_data.events_to_frame.models import PyEventsToFramePM +from ..utils import RecvDense, SendSparse -class RecvDense(AbstractProcess): - """Process that receives arbitrary dense data. - Parameters - ---------- - shape: tuple - Shape of the InPort and Var. - """ - def __init__(self, - shape: ty.Union[ - ty.Tuple[int, int], ty.Tuple[int, int, int]]) -> None: - super().__init__(shape=shape) - - self.in_port = InPort(shape=shape) - - self.data = Var(shape=shape, init=np.zeros(shape, dtype=int)) - - -@implements(proc=RecvDense, protocol=LoihiProtocol) -@requires(CPU) -class PyRecvDensePM(PyLoihiProcessModel): - """Receives dense data from PyInPort and stores it in a Var.""" - in_port: PyInPort = LavaPyType(PyInPort.VEC_DENSE, int) - - data: np.ndarray = LavaPyType(np.ndarray, int) - - def run_spk(self) -> None: - data = self.in_port.recv() - - self.data = data - - -class SendSparse(AbstractProcess): - """Process that sends arbitrary sparse data. - - Parameters - ---------- - shape: tuple - Shape of the OutPort. - """ - def __init__(self, - shape: ty.Tuple[int], - data: np.ndarray, - indices: np.ndarray) -> None: - super().__init__(shape=shape, data=data, indices=indices) - - self.out_port = OutPort(shape=shape) - - -@implements(proc=SendSparse, protocol=LoihiProtocol) -@requires(CPU) -class PySendSparsePM(PyLoihiProcessModel): - """Sends sparse data to PyOutPort.""" - out_port: PyOutPort = LavaPyType(PyOutPort.VEC_SPARSE, int) - - def __init__(self, proc_params): - super().__init__(proc_params) - self._data = proc_params["data"] - self._indices = proc_params["indices"] - - def run_spk(self) -> None: - data = self._data - idx = self._indices - - self.out_port.send(data, idx) - - -class TestProcessModelEventsEventsToFrame(unittest.TestCase): +class TestProcessModelEventsToFrame(unittest.TestCase): def test_init(self): - """Tests instantiation of the SparseToDense process model.""" + """Tests instantiation of the EventsToFrame ProcessModel.""" pm = PyEventsToFramePM() - self.assertIsInstance(pm, PyEventsToFramePM) - def test_third_dimension_1(self): + def test_convert_unary_polarity_events_to_frame(self) -> None: + """Tests whether the EventsToFrame ProcessModel correctly converts + event-based data with unary polarity to a frame-based + representation.""" data = np.array([1, 1, 1, 1, 1, 1]) xs = [0, 1, 2, 1, 2, 4] ys = [0, 2, 1, 5, 7, 7] indices = np.ravel_multi_index((xs, ys), (8, 8)) expected_data = np.zeros((8, 8, 1)) - expected_data[0, 0, 0] = 1 - expected_data[1, 2, 0] = 1 - expected_data[2, 1, 0] = 1 - - expected_data[1, 5, 0] = 1 - expected_data[2, 7, 0] = 1 - - expected_data[4, 7, 0] = 1 + for x, y, p in zip(xs, ys, data): + expected_data[x, y, 0] = p send_sparse = SendSparse(shape=(10,), data=data, indices=indices) to_frame = EventsToFrame(shape_in=(10,), @@ -119,35 +40,28 @@ def test_third_dimension_1(self): send_sparse.out_port.connect(to_frame.in_port) to_frame.out_port.connect(recv_dense.in_port) - num_steps = 1 - run_cfg = Loihi1SimCfg() - run_cnd = RunSteps(num_steps=num_steps) - - to_frame.run(condition=run_cnd, run_cfg=run_cfg) + to_frame.run(condition=RunSteps(num_steps=1), + run_cfg=Loihi1SimCfg()) - sent_and_received_data = \ - recv_dense.data.get() + sent_and_received_data = recv_dense.data.get() to_frame.stop() np.testing.assert_equal(sent_and_received_data, expected_data) - def test_third_dimension_2(self): + def test_convert_binary_polarity_events_to_frame(self) -> None: + """Tests whether the EventsToFrame ProcessModel correctly converts + event-based data with binary polarity to a frame-based + representation.""" data = np.array([1, 0, 1, 0, 1, 0]) xs = [0, 1, 2, 1, 2, 4] ys = [0, 2, 1, 5, 7, 7] indices = np.ravel_multi_index((xs, ys), (8, 8)) expected_data = np.zeros((8, 8, 2)) - expected_data[0, 0, 1] = 1 - expected_data[1, 2, 0] = 1 - expected_data[2, 1, 1] = 1 - - expected_data[1, 5, 0] = 1 - expected_data[2, 7, 1] = 1 - - expected_data[4, 7, 0] = 1 + for x, y, p in zip(xs, ys, data): + expected_data[x, y, p] = 1 send_sparse = SendSparse(shape=(10,), data=data, indices=indices) to_frame = EventsToFrame(shape_in=(10,), @@ -157,14 +71,10 @@ def test_third_dimension_2(self): send_sparse.out_port.connect(to_frame.in_port) to_frame.out_port.connect(recv_dense.in_port) - num_steps = 1 - run_cfg = Loihi1SimCfg() - run_cnd = RunSteps(num_steps=num_steps) - - to_frame.run(condition=run_cnd, run_cfg=run_cfg) + to_frame.run(condition=RunSteps(num_steps=1), + run_cfg=Loihi1SimCfg()) - sent_and_received_data = \ - recv_dense.data.get() + sent_and_received_data = recv_dense.data.get() to_frame.stop() diff --git a/tests/lava/proc/event_data/utils.py b/tests/lava/proc/event_data/utils.py index b0ddd8255..73be65207 100644 --- a/tests/lava/proc/event_data/utils.py +++ b/tests/lava/proc/event_data/utils.py @@ -57,6 +57,38 @@ def run_spk(self) -> None: ) +class RecvDense(AbstractProcess): + """Process that receives arbitrary dense data. + + Parameters + ---------- + shape: tuple + Shape of the InPort and Var. + """ + def __init__(self, + shape: ty.Union[ + ty.Tuple[int, int], ty.Tuple[int, int, int]]) -> None: + super().__init__(shape=shape) + + self.in_port = InPort(shape=shape) + + self.data = Var(shape=shape, init=np.zeros(shape, dtype=int)) + + +@implements(proc=RecvDense, protocol=LoihiProtocol) +@requires(CPU) +class PyRecvDensePM(PyLoihiProcessModel): + """Receives dense data from PyInPort and stores it in a Var.""" + in_port: PyInPort = LavaPyType(PyInPort.VEC_DENSE, int) + + data: np.ndarray = LavaPyType(np.ndarray, int) + + def run_spk(self) -> None: + data = self.in_port.recv() + + self.data = data + + class SendSparse(AbstractProcess): """Process that sends arbitrary sparse data. From 283b2968b29f91d929ff1eae495ae0f791350f0c Mon Sep 17 00:00:00 2001 From: SveaMeyer13 Date: Thu, 8 Dec 2022 18:17:37 +0100 Subject: [PATCH 25/32] dv_stream without subsampling --- src/lava/proc/event_data/io/dv_stream.py | 64 +++++++++++++ .../lava/proc/event_data/io/test_dv_stream.py | 91 +++++++++++++++---- 2 files changed, 139 insertions(+), 16 deletions(-) diff --git a/src/lava/proc/event_data/io/dv_stream.py b/src/lava/proc/event_data/io/dv_stream.py index 2f8428cc6..7ed5e08fb 100644 --- a/src/lava/proc/event_data/io/dv_stream.py +++ b/src/lava/proc/event_data/io/dv_stream.py @@ -3,6 +3,7 @@ # See: https://spdx.org/licenses/ import typing as ty +import warnings from dv import NetworkNumpyEventPacketInput from lava.magma.core.decorator import implements, requires @@ -13,6 +14,9 @@ from lava.magma.core.process.process import AbstractProcess from lava.magma.core.resources import CPU from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol +import numpy as np + +from lava.utils.events import sub_sample class DvStream(AbstractProcess): @@ -20,15 +24,18 @@ def __init__(self, *, address: str, port: int, + shape_frame_in: ty.Tuple[int, int], shape_out: ty.Tuple[int], **kwargs) -> None: super().__init__(address=address, port=port, shape_out=shape_out, + shape_frame_in=shape_frame_in, **kwargs) self._validate_address(address) self._validate_port(port) self._validate_shape(shape_out) + self._validate_frame_size(shape_frame_in) self.out_port = OutPort(shape=shape_out) @@ -58,6 +65,16 @@ def _validate_shape(shape: ty.Tuple[int]) -> None: raise ValueError(f"Size of the shape (maximum number of events) " f"must be positive; got {shape=}.") + @staticmethod + def _validate_frame_size(shape: ty.Tuple[int, int]) -> None: + """Check that shape one-dimensional with a positive size.""" + if len(shape) != 2: + raise ValueError(f"Shape of the frame should be (n,); " + f"got {shape=}.") + if shape[0] <= 0 or shape[1] <= 0: + raise ValueError(f"Size of the frame " + f"must be positive; got {shape=}.") + @implements(proc=DvStream, protocol=LoihiProtocol) @requires(CPU) @@ -70,9 +87,56 @@ def __init__(self, proc_params: dict) -> None: self._address = proc_params["address"] self._port = proc_params["port"] self._shape_out = proc_params["shape_out"] + self._frame_shape = proc_params["shape_frame_in"] self._event_stream = proc_params.get("event_stream") if not self._event_stream: self._event_stream = NetworkNumpyEventPacketInput( address=self._address, port=self._port ) + + def run_spk(self) -> None: + """ + Compiles events into a batch (roughly 10ms long). The polarity data + and x and y values are then used to encode the sparse tensor. The + data is sub-sampled if necessary, and then sent out. + """ + events = self._get_next_event_batch() + if not events: + data = np.empty(self._shape_out) + indices = np.empty(self._shape_out) + warnings.warn("no events received") + else: + data, indices = self._encode_data_and_indices(events) + # If we have more data than our shape allows, subsample + # if data.shape[0] > self._shape_out[0]: + # data, indices = sub_sample(data, indices, + # self._shape_out[0], self._random_rng) + self.out_port.send(data, indices) + + def _get_next_event_batch(self): + """ + Compiles events from the event stream into batches which will be + treated in a single timestep. Once we reach the end of the file, the + process loops back to the start of the file. + """ + try: + # If end of file, raises StopIteration error. + events = self._event_stream.__next__() + # TODO add exact error that is thrown + except: + return None + return events + + def _encode_data_and_indices(self, + events: np.ndarray) \ + -> ty.Tuple[np.ndarray, np.ndarray]: + """ + Extracts the polarity data, and x and y indices from the given + batch of events, and encodes them accordingly. + """ + xs, ys, ps = events['x'], events['y'], events['polarity'] + data = ps + indices = np.ravel_multi_index((xs, ys), self._frame_shape) + + return data, indices diff --git a/tests/lava/proc/event_data/io/test_dv_stream.py b/tests/lava/proc/event_data/io/test_dv_stream.py index dac6fe69d..a95fa465c 100644 --- a/tests/lava/proc/event_data/io/test_dv_stream.py +++ b/tests/lava/proc/event_data/io/test_dv_stream.py @@ -22,9 +22,10 @@ class TestProcessDvStream(unittest.TestCase): def test_init(self) -> None: - """Tests instantiation of AedatDataLoader.""" + """Tests instantiation of DvStream.""" stream = DvStream(address="127.0.0.1", port=7777, + shape_frame_in=(35, 35), shape_out=(43200,), additional_kwarg=5) @@ -32,21 +33,42 @@ def test_init(self) -> None: self.assertEqual(stream.out_port.shape, (43200,)) self.assertEqual(stream.proc_params["additional_kwarg"], 5) - def test_invalid_shape_throws_exception(self) -> None: + def test_invalid_out_shape_throws_exception(self) -> None: """Tests whether a shape that is invalid (not one-dimensional) throws an exception.""" invalid_shape = (240, 180) with(self.assertRaises(ValueError)): DvStream(address="127.0.0.1", port=7777, + shape_frame_in=(35, 35), shape_out=invalid_shape) + def test_invalid_in_shape_throws_exception(self) -> None: + """Tests whether a shape that is invalid (not two-dimensional) throws + an exception.""" + invalid_in_shape = (240,) + with(self.assertRaises(ValueError)): + DvStream(address="127.0.0.1", + port=7777, + shape_frame_in=invalid_in_shape, + shape_out=(43200,)) + + def test_negative_frame_size_throws_exception(self) -> None: + """Tests whether a shape with a negative size throws an exception.""" + invalid_shape = (-35,-35) + with(self.assertRaises(ValueError)): + DvStream(address="127.0.0.1", + port=7777, + shape_frame_in=invalid_shape, + shape_out=(43200,)) + def test_negative_size_throws_exception(self) -> None: """Tests whether a shape with a negative size throws an exception.""" invalid_shape = (-43200,) with(self.assertRaises(ValueError)): DvStream(address="127.0.0.1", port=7777, + shape_frame_in=(35, 35), shape_out=invalid_shape) def test_negative_port_throws_exception(self) -> None: @@ -56,6 +78,7 @@ def test_negative_port_throws_exception(self) -> None: with(self.assertRaises(ValueError)): DvStream(address="127.0.0.1", port=invalid_port, + shape_frame_in=(35, 35), shape_out=(43200,)) def test_port_out_of_range_throws_exception(self) -> None: @@ -66,6 +89,7 @@ def test_port_out_of_range_throws_exception(self) -> None: with(self.assertRaises(ValueError)): DvStream(address="127.0.0.1", port=invalid_port, + shape_frame_in=(35, 35), shape_out=(43200,)) def test_address_empty_string_throws_exception(self) -> None: @@ -74,6 +98,7 @@ def test_address_empty_string_throws_exception(self) -> None: with(self.assertRaises(ValueError)): DvStream(address=invalid_address, port=7777, + shape_frame_in=(35, 35), shape_out=(43200,)) @@ -85,6 +110,7 @@ class RecvSparse(AbstractProcess): shape: tuple Shape of the InPort and Vars. """ + def __init__(self, shape: ty.Tuple[int]) -> None: super().__init__(shape=shape) @@ -114,22 +140,21 @@ def run_spk(self) -> None: pad_width=(0, self.in_port.shape[0] - data.shape[0])) -class MockPacketInput: - def __next__(self): - return { - "x": 35, - "y": 35, - "polarity": 0, - } - - class TestProcessModelDvStream(unittest.TestCase): def setUp(self) -> None: + self._mock_packet_input = ({"x": np.asarray([35, 32, 33]), "y": np.asarray([35, 32, 31]), + "polarity": np.asarray([0, 0, 0])}, + {"x": np.asarray([35, 32, 33]), "y": np.asarray([35, 32, 31]), + "polarity": np.asarray([0, 0, 0])}, + {"x": np.asarray([35, 32, 33]), "y": np.asarray([35, 32, 31]), + "polarity": np.asarray([0, 0, 0])}) + self.proc_params = { "address": "127.0.0.1", "port": 7777, + "shape_frame_in": (40, 40), "shape_out": (43200,), - "event_stream": MockPacketInput() + "event_stream": iter(self._mock_packet_input) } def test_init(self) -> None: @@ -137,21 +162,55 @@ def test_init(self) -> None: pm = DvStreamPM(proc_params=self.proc_params) self.assertIsInstance(pm, DvStreamPM) - def test_run_spk(self) -> None: + def test_run_spk_without_subsampling(self) -> None: + """ Test that run_spk works as expected when no subsampling is needed.""" max_num_events = 15 shape = (max_num_events,) - + shape_frame_in = (40,40) dv_stream = DvStream(address="127.0.0.1", port=7777, shape_out=shape, - event_stream=MockPacketInput()) + shape_frame_in=shape_frame_in, + event_stream=iter(self._mock_packet_input)) recv_sparse = RecvSparse(shape=shape) dv_stream.out_port.connect(recv_sparse.in_port) - dv_stream.run(condition=RunSteps(num_steps=1), run_cfg=Loihi1SimCfg()) + for mock_package in self._mock_packet_input: + dv_stream.run(condition=RunSteps(num_steps=1), run_cfg=Loihi1SimCfg()) + + expected_data = mock_package["polarity"] + expected_indices = np.ravel_multi_index((mock_package["x"], mock_package["y"]), shape_frame_in) + + sent_and_received_data = \ + recv_sparse.data.get()[:expected_data.shape[0]] + sent_and_received_indices = \ + recv_sparse.idx.get()[:expected_indices.shape[0]] + + np.testing.assert_equal(sent_and_received_data, + expected_data) + np.testing.assert_equal(sent_and_received_indices, + expected_indices) dv_stream.stop() + def test_run_spk_with_no_next(self) -> None: + """ Test that warning is raised when no events are arriving.""" + # TODO: Check whether warning arrives + # with (self.assertWarns(UserWarning)): + max_num_events = 15 + shape = (max_num_events,) + + dv_stream = DvStream(address="127.0.0.1", + port=7777, + shape_out=shape, + shape_frame_in=(40, 40), + event_stream=iter(self._mock_packet_input)) + recv_sparse = RecvSparse(shape=shape) + + dv_stream.out_port.connect(recv_sparse.in_port) + dv_stream.run(condition=RunSteps(num_steps=len(self._mock_packet_input)+1), run_cfg=Loihi1SimCfg()) + dv_stream.stop() if __name__ == '__main__': unittest.main() + From 6d8a4dccdf5b37d91ba2adda6fdda9a1b87d6c6e Mon Sep 17 00:00:00 2001 From: "Cohen-Dumani, Joshua" Date: Fri, 9 Dec 2022 16:31:18 +0100 Subject: [PATCH 26/32] updated dv_stream + tests --- src/lava/proc/event_data/io/aedat_stream.py | 5 +- src/lava/proc/event_data/io/dv_stream.py | 13 +- src/lava/utils/events.py | 18 +- .../lava/proc/event_data/io/test_dv_stream.py | 227 +++++++++++++++--- 4 files changed, 215 insertions(+), 48 deletions(-) diff --git a/src/lava/proc/event_data/io/aedat_stream.py b/src/lava/proc/event_data/io/aedat_stream.py index a5ee58234..ff1473d54 100644 --- a/src/lava/proc/event_data/io/aedat_stream.py +++ b/src/lava/proc/event_data/io/aedat_stream.py @@ -16,7 +16,7 @@ from lava.magma.core.resources import CPU from lava.magma.core.decorator import implements, requires from lava.magma.core.model.py.model import PyLoihiProcessModel -from lava.utils.events import sub_sample +from lava.utils.events import sub_sample, encode_data_and_indices class AedatStream(AbstractProcess): @@ -116,7 +116,8 @@ def run_spk(self) -> None: """ events = self._get_next_event_batch() - data, indices = self._encode_data_and_indices(events) + data, indices = encode_data_and_indices(frame_shape=self._frame_shape, + events=events) # If we have more data than our shape allows, sub-sample if data.shape[0] > self._shape_out[0]: diff --git a/src/lava/proc/event_data/io/dv_stream.py b/src/lava/proc/event_data/io/dv_stream.py index 7ed5e08fb..52e73d5c0 100644 --- a/src/lava/proc/event_data/io/dv_stream.py +++ b/src/lava/proc/event_data/io/dv_stream.py @@ -2,6 +2,7 @@ # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ +import numpy as np import typing as ty import warnings from dv import NetworkNumpyEventPacketInput @@ -14,9 +15,7 @@ from lava.magma.core.process.process import AbstractProcess from lava.magma.core.resources import CPU from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol -import numpy as np - -from lava.utils.events import sub_sample +from lava.utils.events import sub_sample, encode_data_and_indices class DvStream(AbstractProcess): @@ -26,11 +25,13 @@ def __init__(self, port: int, shape_frame_in: ty.Tuple[int, int], shape_out: ty.Tuple[int], + seed_sub_sampling: ty.Optional[int] = 0, **kwargs) -> None: super().__init__(address=address, port=port, shape_out=shape_out, shape_frame_in=shape_frame_in, + seed_sub_sampling=seed_sub_sampling, **kwargs) self._validate_address(address) self._validate_port(port) @@ -88,6 +89,7 @@ def __init__(self, proc_params: dict) -> None: self._port = proc_params["port"] self._shape_out = proc_params["shape_out"] self._frame_shape = proc_params["shape_frame_in"] + self._seed_sub_sampling = proc_params["seed_sub_sampling"] self._event_stream = proc_params.get("event_stream") if not self._event_stream: self._event_stream = NetworkNumpyEventPacketInput( @@ -107,7 +109,8 @@ def run_spk(self) -> None: indices = np.empty(self._shape_out) warnings.warn("no events received") else: - data, indices = self._encode_data_and_indices(events) + data, indices = encode_data_and_indices(self._frame_shape, + events) # If we have more data than our shape allows, subsample # if data.shape[0] > self._shape_out[0]: # data, indices = sub_sample(data, indices, @@ -133,7 +136,7 @@ def _encode_data_and_indices(self, -> ty.Tuple[np.ndarray, np.ndarray]: """ Extracts the polarity data, and x and y indices from the given - batch of events, and encodes them accordingly. + batch of events, and encodes them using C-style encoding. """ xs, ys, ps = events['x'], events['y'], events['polarity'] data = ps diff --git a/src/lava/utils/events.py b/src/lava/utils/events.py index c0bee702b..c07481bb5 100644 --- a/src/lava/utils/events.py +++ b/src/lava/utils/events.py @@ -10,13 +10,25 @@ def sub_sample(data: np.ndarray, indices: np.ndarray, max_events: int, random_rng: ty.Optional[np.random.Generator] = None) \ - -> ty.Tuple[np.ndarray, np.ndarray]: - + -> ty.Tuple[np.ndarray, np.ndarray]: data_idx_array = np.arange(0, data.shape[0]) sampled_idx = random_rng.choice(data_idx_array, max_events, replace=False) sampled_idx = np.sort(sampled_idx) - return data[sampled_idx], indices[sampled_idx] + + +def encode_data_and_indices(frame_shape: ty.Tuple, + events: np.ndarray) \ + -> ty.Tuple[np.ndarray, np.ndarray]: + """ + Extracts the polarity data, and x and y indices from the given + batch of events, and encodes them using C-style encoding. + """ + xs, ys, ps = events['x'], events['y'], events['polarity'] + data = ps + indices = np.ravel_multi_index((xs, ys), frame_shape) + + return data, indices diff --git a/tests/lava/proc/event_data/io/test_dv_stream.py b/tests/lava/proc/event_data/io/test_dv_stream.py index a95fa465c..6fbae6275 100644 --- a/tests/lava/proc/event_data/io/test_dv_stream.py +++ b/tests/lava/proc/event_data/io/test_dv_stream.py @@ -140,77 +140,228 @@ def run_spk(self) -> None: pad_width=(0, self.in_port.shape[0] - data.shape[0])) +class MockPacketInput: + def __init__(self, + mock_packets): + self._mock_packets = mock_packets + self.timestep = 0 + + def __next__(self): + if self.timestep < len(self._mock_packets): + packet = self._mock_packets[self.timestep] + self.timestep += 1 + + return packet + else: + raise StopIteration # TODO: change this to error from network... object + + @property + def mock_packets(self): + return self._mock_packets + class TestProcessModelDvStream(unittest.TestCase): - def setUp(self) -> None: - self._mock_packet_input = ({"x": np.asarray([35, 32, 33]), "y": np.asarray([35, 32, 31]), - "polarity": np.asarray([0, 0, 0])}, - {"x": np.asarray([35, 32, 33]), "y": np.asarray([35, 32, 31]), - "polarity": np.asarray([0, 0, 0])}, - {"x": np.asarray([35, 32, 33]), "y": np.asarray([35, 32, 31]), - "polarity": np.asarray([0, 0, 0])}) - - self.proc_params = { + def test_init(self) -> None: + """Tests instantiation of the DvStream PyProcModel.""" + mock_packets = ({"x": np.asarray([8, 12, 13]), "y": np.asarray([157, 148, 146]), + "polarity": np.asarray([0, 1, 0])}, + {"x": np.asarray([39]), "y": np.asarray([118]), + "polarity": np.asarray([1])}, + {"x": np.asarray([12, 10]), "y": np.asarray([163, 108]), + "polarity": np.asarray([1, 1])}) + + mock_packet_input = MockPacketInput(mock_packets) + + proc_params = { "address": "127.0.0.1", "port": 7777, - "shape_frame_in": (40, 40), + "shape_frame_in": (240, 180), "shape_out": (43200,), - "event_stream": iter(self._mock_packet_input) + "seed_sub_sampling": 0, + "event_stream": iter(mock_packet_input.mock_packets) } - def test_init(self) -> None: - """Tests instantiation of the DvStream PyProcModel.""" - pm = DvStreamPM(proc_params=self.proc_params) + pm = DvStreamPM(proc_params=proc_params) self.assertIsInstance(pm, DvStreamPM) def test_run_spk_without_subsampling(self) -> None: - """ Test that run_spk works as expected when no subsampling is needed.""" + """ + Tests that run_spk works as expected when no subsampling is needed. + """ + mock_packets = ({"x": np.asarray([8, 12, 13]), "y": np.asarray([157, 148, 146]), + "polarity": np.asarray([0, 1, 0])}, + {"x": np.asarray([39]), "y": np.asarray([118]), + "polarity": np.asarray([1])}, + {"x": np.asarray([12, 10]), "y": np.asarray([163, 108]), + "polarity": np.asarray([1, 1])}) + + mock_packet_input = MockPacketInput(mock_packets) + + # data and indices calculated from the mock packets + data_history = [ + [0, 1, 0], + [1], + [1, 1] + ] + indices_history = [ + [1597, 2308, 2486], + [7138], + [2323, 1908] + ] + max_num_events = 15 - shape = (max_num_events,) - shape_frame_in = (40,40) + shape_frame_in = (240, 180) dv_stream = DvStream(address="127.0.0.1", port=7777, - shape_out=shape, + shape_out=(max_num_events,), shape_frame_in=shape_frame_in, - event_stream=iter(self._mock_packet_input)) - recv_sparse = RecvSparse(shape=shape) + event_stream=iter(mock_packet_input.mock_packets)) + + recv_sparse = RecvSparse(shape=(max_num_events,)) dv_stream.out_port.connect(recv_sparse.in_port) - for mock_package in self._mock_packet_input: - dv_stream.run(condition=RunSteps(num_steps=1), run_cfg=Loihi1SimCfg()) + num_steps = 3 + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=1) - expected_data = mock_package["polarity"] - expected_indices = np.ravel_multi_index((mock_package["x"], mock_package["y"]), shape_frame_in) + for i in range(num_steps): + dv_stream.run(condition=run_cnd, run_cfg=run_cfg) - sent_and_received_data = \ + expected_data = np.array(data_history[i]) + expected_indices = np.array(indices_history[i]) + + received_data = \ recv_sparse.data.get()[:expected_data.shape[0]] - sent_and_received_indices = \ + received_indices = \ recv_sparse.idx.get()[:expected_indices.shape[0]] - np.testing.assert_equal(sent_and_received_data, - expected_data) - np.testing.assert_equal(sent_and_received_indices, - expected_indices) + np.testing.assert_equal(received_data, expected_data) + np.testing.assert_equal(received_indices, expected_indices) + dv_stream.stop() - def test_run_spk_with_no_next(self) -> None: + def test_run_spk_with_empty_batch(self) -> None: """ Test that warning is raised when no events are arriving.""" - # TODO: Check whether warning arrives + # TODO: Add appropriate error in dv_stream when receiving an empty batch, catch the error + mock_packets = ({"x": np.asarray([8, 12, 13]), "y": np.asarray([157, 148, 146]), + "polarity": np.asarray([0, 1, 0])}, + {"x": np.asarray([39]), "y": np.asarray([118]), + "polarity": np.asarray([1])}, + {"x": np.asarray([12, 10]), "y": np.asarray([163, 108]), + "polarity": np.asarray([1, 1])}, + {"x": np.asarray([]), "y": np.asarray([]), + "polarity": np.asarray([])}) + + mock_packet_input = MockPacketInput(mock_packets) + + max_num_events = 15 + shape_frame_in = (240, 180) + dv_stream = DvStream(address="127.0.0.1", + port=7777, + shape_out=(max_num_events,), + shape_frame_in=shape_frame_in, + event_stream=iter(mock_packet_input.mock_packets)) + + recv_sparse = RecvSparse(shape=(max_num_events,)) + + dv_stream.out_port.connect(recv_sparse.in_port) + + num_steps = 4 + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=1) + + for i in range(num_steps): + print(i) + dv_stream.run(condition=run_cnd, run_cfg=run_cfg) + + dv_stream.stop() + + def test_run_spk_with_no_batch(self) -> None: + """ Test that an exception is thrown when the event stream stops.""" + # TODO: Add functionality for this in dv_stream # with (self.assertWarns(UserWarning)): + mock_packets = ({"x": np.asarray([8, 12, 13]), "y": np.asarray([157, 148, 146]), + "polarity": np.asarray([0, 1, 0])}) + + mock_packet_input = MockPacketInput(mock_packets) + max_num_events = 15 - shape = (max_num_events,) + shape_frame_in = (240, 180) + dv_stream = DvStream(address="127.0.0.1", + port=7777, + shape_out=(max_num_events,), + shape_frame_in=shape_frame_in, + event_stream=iter(mock_packet_input.mock_packets)) + + recv_sparse = RecvSparse(shape=(max_num_events,)) + + dv_stream.out_port.connect(recv_sparse.in_port) + + num_steps = len(mock_packets) + print(num_steps) + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=1) + for i in range(num_steps): + print(i) + dv_stream.run(condition=run_cnd, run_cfg=run_cfg) + + dv_stream.stop() + + def test_run_spk_with_sub_sampling(self): + mock_packets = ({"x": np.asarray([8, 12, 13, 13, 13, 9, 14, 14, 13, 13, 8, 9, 9, 13, 9]), + "y": np.asarray([157, 148, 146, 156, 158, 167, 122, 113, 149, 148, 156, + 109, 107, 160, 160]), + "polarity": np.asarray([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])}, + {"x": np.asarray([39]), + "y": np.asarray([118]), + "polarity": np.asarray([1])}) + + self.mock_packet_input = MockPacketInput(mock_packets) + + # data and indices calculated from the mock packets + expected_data = [ + [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], + [1] + ] + expected_indices = [ + [1597., 2486., 2496., 2498., 1787., 2633., 1729., 1727., 2500., 1780.], + [7138] + ] + + max_num_events = 10 + shape_frame_in = (240, 180) + seed_rng = 0 dv_stream = DvStream(address="127.0.0.1", port=7777, - shape_out=shape, - shape_frame_in=(40, 40), - event_stream=iter(self._mock_packet_input)) - recv_sparse = RecvSparse(shape=shape) + shape_out=(max_num_events,), + shape_frame_in=shape_frame_in, + event_stream=iter(self.mock_packet_input.mock_packets), + seed_sub_sampling=seed_rng) + + recv_sparse = RecvSparse(shape=(max_num_events,)) dv_stream.out_port.connect(recv_sparse.in_port) - dv_stream.run(condition=RunSteps(num_steps=len(self._mock_packet_input)+1), run_cfg=Loihi1SimCfg()) + + num_steps = 2 + run_cfg = Loihi1SimCfg() + run_cnd = RunSteps(num_steps=1) + + for i in range(num_steps): + dv_stream.run(condition=run_cnd, run_cfg=run_cfg) + + received_data = \ + recv_sparse.data.get()[:len(expected_data[i])] + received_indices = \ + recv_sparse.idx.get()[:len(expected_indices[i])] + + np.testing.assert_equal(received_data, expected_data[i]) + np.testing.assert_equal(received_indices, expected_indices[i]) + dv_stream.stop() + + if __name__ == '__main__': unittest.main() From 26171930e2ee6234da141bc9a5a815ff294ad555 Mon Sep 17 00:00:00 2001 From: "Cohen-Dumani, Joshua" Date: Fri, 9 Dec 2022 16:32:47 +0100 Subject: [PATCH 27/32] moved encoding method into utils --- src/lava/utils/events.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/lava/utils/events.py b/src/lava/utils/events.py index c07481bb5..4a1fef9cc 100644 --- a/src/lava/utils/events.py +++ b/src/lava/utils/events.py @@ -23,10 +23,8 @@ def sub_sample(data: np.ndarray, def encode_data_and_indices(frame_shape: ty.Tuple, events: np.ndarray) \ -> ty.Tuple[np.ndarray, np.ndarray]: - """ - Extracts the polarity data, and x and y indices from the given - batch of events, and encodes them using C-style encoding. - """ + """Extracts the polarity data, and x and y indices from the given + batch of events, and encodes them using C-style encoding.""" xs, ys, ps = events['x'], events['y'], events['polarity'] data = ps indices = np.ravel_multi_index((xs, ys), frame_shape) From a6778a4f453dfa3f31c64df6e91477f28286f33f Mon Sep 17 00:00:00 2001 From: "Cohen-Dumani, Joshua" Date: Fri, 9 Dec 2022 18:58:16 +0100 Subject: [PATCH 28/32] sub-sampling and polishing other tests --- src/lava/proc/event_data/io/dv_stream.py | 17 +++++++++------ .../lava/proc/event_data/io/test_dv_stream.py | 21 +++++++++---------- 2 files changed, 21 insertions(+), 17 deletions(-) diff --git a/src/lava/proc/event_data/io/dv_stream.py b/src/lava/proc/event_data/io/dv_stream.py index 52e73d5c0..0b178c0d4 100644 --- a/src/lava/proc/event_data/io/dv_stream.py +++ b/src/lava/proc/event_data/io/dv_stream.py @@ -90,6 +90,7 @@ def __init__(self, proc_params: dict) -> None: self._shape_out = proc_params["shape_out"] self._frame_shape = proc_params["shape_frame_in"] self._seed_sub_sampling = proc_params["seed_sub_sampling"] + self._random_rng = np.random.default_rng(self._seed_sub_sampling) self._event_stream = proc_params.get("event_stream") if not self._event_stream: self._event_stream = NetworkNumpyEventPacketInput( @@ -104,17 +105,20 @@ def run_spk(self) -> None: data is sub-sampled if necessary, and then sent out. """ events = self._get_next_event_batch() + # if we have not received a new batch if not events: data = np.empty(self._shape_out) indices = np.empty(self._shape_out) warnings.warn("no events received") + elif not events["data"]: + warnings.warn() else: data, indices = encode_data_and_indices(self._frame_shape, events) # If we have more data than our shape allows, subsample - # if data.shape[0] > self._shape_out[0]: - # data, indices = sub_sample(data, indices, - # self._shape_out[0], self._random_rng) + if data.shape[0] > self._shape_out[0]: + data, indices = sub_sample(data, indices, + self._shape_out[0], self._random_rng) self.out_port.send(data, indices) def _get_next_event_batch(self): @@ -126,9 +130,10 @@ def _get_next_event_batch(self): try: # If end of file, raises StopIteration error. events = self._event_stream.__next__() - # TODO add exact error that is thrown - except: - return None + except StopIteration: + # TODO: define expected behavior + raise StopIteration(f"No events received. Check that everything is well connected.") + # return None return events def _encode_data_and_indices(self, diff --git a/tests/lava/proc/event_data/io/test_dv_stream.py b/tests/lava/proc/event_data/io/test_dv_stream.py index 6fbae6275..a9af4cd78 100644 --- a/tests/lava/proc/event_data/io/test_dv_stream.py +++ b/tests/lava/proc/event_data/io/test_dv_stream.py @@ -147,13 +147,13 @@ def __init__(self, self.timestep = 0 def __next__(self): - if self.timestep < len(self._mock_packets): - packet = self._mock_packets[self.timestep] - self.timestep += 1 + # if self.timestep < len(self._mock_packets): + packet = self._mock_packets[self.timestep] + self.timestep += 1 - return packet - else: - raise StopIteration # TODO: change this to error from network... object + return packet + # else: + # raise StopIteration # TODO: change this to actual behavior from DV network input object @property def mock_packets(self): @@ -242,7 +242,7 @@ def test_run_spk_without_subsampling(self) -> None: def test_run_spk_with_empty_batch(self) -> None: """ Test that warning is raised when no events are arriving.""" - # TODO: Add appropriate error in dv_stream when receiving an empty batch, catch the error + # TODO: Add appropriate behavior in process mock_packets = ({"x": np.asarray([8, 12, 13]), "y": np.asarray([157, 148, 146]), "polarity": np.asarray([0, 1, 0])}, {"x": np.asarray([39]), "y": np.asarray([118]), @@ -278,10 +278,10 @@ def test_run_spk_with_empty_batch(self) -> None: def test_run_spk_with_no_batch(self) -> None: """ Test that an exception is thrown when the event stream stops.""" - # TODO: Add functionality for this in dv_stream + # TODO: Add behavior in dv_stream # with (self.assertWarns(UserWarning)): mock_packets = ({"x": np.asarray([8, 12, 13]), "y": np.asarray([157, 148, 146]), - "polarity": np.asarray([0, 1, 0])}) + "polarity": np.asarray([0, 1, 0])},) mock_packet_input = MockPacketInput(mock_packets) @@ -297,7 +297,7 @@ def test_run_spk_with_no_batch(self) -> None: dv_stream.out_port.connect(recv_sparse.in_port) - num_steps = len(mock_packets) + num_steps = len(mock_packets) + 1 print(num_steps) run_cfg = Loihi1SimCfg() run_cnd = RunSteps(num_steps=1) @@ -361,7 +361,6 @@ def test_run_spk_with_sub_sampling(self): dv_stream.stop() - if __name__ == '__main__': unittest.main() From ffac5f6cc929f9065f613c3a14ca022dbb813045 Mon Sep 17 00:00:00 2001 From: Mathis Richter Date: Fri, 9 Dec 2022 22:26:32 +0100 Subject: [PATCH 29/32] Reviewed AedatStream unit tests. Signed-off-by: Mathis Richter --- .../proc/event_data/io/test_aedat_stream.py | 349 ++++++------------ 1 file changed, 122 insertions(+), 227 deletions(-) diff --git a/tests/lava/proc/event_data/io/test_aedat_stream.py b/tests/lava/proc/event_data/io/test_aedat_stream.py index f37c31941..fca6fabf4 100644 --- a/tests/lava/proc/event_data/io/test_aedat_stream.py +++ b/tests/lava/proc/event_data/io/test_aedat_stream.py @@ -2,136 +2,75 @@ # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ -from dv import AedatFile -from dv.AedatFile import _AedatFileEventNumpyPacketIterator import numpy as np import typing as ty import unittest +from pathlib import Path -from lava.magma.core.process.process import AbstractProcess -from lava.magma.core.process.ports.ports import InPort -from lava.magma.core.process.variable import Var -from lava.magma.core.resources import CPU -from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol -from lava.magma.core.model.py.ports import PyInPort -from lava.magma.core.model.py.type import LavaPyType -from lava.magma.core.decorator import implements, requires -from lava.magma.core.model.py.model import PyLoihiProcessModel from lava.magma.core.run_conditions import RunSteps from lava.magma.core.run_configs import Loihi1SimCfg from lava.proc.event_data.io.aedat_stream import AedatStream, AedatStreamPM +from ..utils import RecvSparse -class RecvSparse(AbstractProcess): - """Process that receives arbitrary sparse data. - - Parameters - ---------- - shape: tuple - Shape of the InPort and Vars. - """ - def __init__(self, - shape: ty.Tuple[int]) -> None: - super().__init__(shape=shape) - - self.in_port = InPort(shape=shape) - - self.data = Var(shape=shape, init=np.zeros(shape, dtype=int)) - self.idx = Var(shape=shape, init=np.zeros(shape, dtype=int)) - - -@implements(proc=RecvSparse, protocol=LoihiProtocol) -@requires(CPU) -class PyRecvSparsePM(PyLoihiProcessModel): - """Receives sparse data from PyInPort and stores a padded version of - received data and indices in Vars.""" - in_port: PyInPort = LavaPyType(PyInPort.VEC_SPARSE, int) - - data: np.ndarray = LavaPyType(np.ndarray, int) - idx: np.ndarray = LavaPyType(np.ndarray, int) - - def run_spk(self) -> None: - data, idx = self.in_port.recv() - - self.data = np.pad(data, - pad_width=(0, self.in_port.shape[0] - data.shape[0])) - self.idx = np.pad(idx, - pad_width=(0, self.in_port.shape[0] - data.shape[0])) +current_directory = Path(__file__).resolve().parent +aedat_file_path = str(current_directory.parent / "dvs_recording.aedat4") class TestProcessAedatStream(unittest.TestCase): - def test_init(self): - """ - Tests instantiation of AedatStream. - """ - data_loader = AedatStream(file_path="../dvs_recording.aedat4", + def test_init(self) -> None: + """Tests instantiation of AedatStream.""" + data_loader = AedatStream(file_path=aedat_file_path, shape_out=(43200,)) self.assertIsInstance(data_loader, AedatStream) - self.assertEqual(data_loader.proc_params["file_path"], - "../dvs_recording.aedat4") - self.assertEqual(data_loader.proc_params["shape_out"], (43200,)) + self.assertEqual(data_loader.proc_params["file_path"], aedat_file_path) + self.assertEqual(data_loader.out_port.shape, (43200,)) - def test_unsupported_file_extension_throws_exception(self): - """ - Tests whether a file_path argument with an unsupported file extension - throws an exception. - """ + def test_unsupported_file_extension_throws_exception(self) -> None: + """Tests whether a file_path argument with an unsupported file + extension throws an exception.""" + unsupported_extension = "py" with(self.assertRaises(ValueError)): - AedatStream(file_path="test_aedat_data_loader.py", + AedatStream(file_path="test_file." + unsupported_extension, shape_out=(43200,)) - def test_missing_file_throws_exception(self): - """ - Tests whether an exception is thrown when a specified file does not exist. - """ + def test_missing_file_throws_exception(self) -> None: + """Tests whether an exception is thrown when a specified file does not + exist.""" with(self.assertRaises(FileNotFoundError)): AedatStream(file_path="missing_file.aedat4", shape_out=(43200,)) - def test_invalid_shape_throws_exception(self): - """ - Tests whether a shape_out argument with an invalid shape throws an exception. - """ + def test_invalid_shape_throws_exception(self) -> None: + """Tests whether an invalid shape_out (not one-dimensional) + throws an exception.""" + invalid_shape = (240, 180) with(self.assertRaises(ValueError)): - AedatStream(file_path="../dvs_recording.aedat4", - shape_out=(240, 180)) + AedatStream(file_path=aedat_file_path, + shape_out=invalid_shape) - def test_negative_size_throws_exception(self): - """ - Tests whether a shape_out argument with a negative size throws an exception. - """ + def test_negative_size_throws_exception(self) -> None: + """Tests whether shape_out with a negative size throws an exception.""" + invalid_size = -43200 with(self.assertRaises(ValueError)): - AedatStream(file_path="../dvs_recording.aedat4", - shape_out=(-43200,)) + AedatStream(file_path=aedat_file_path, + shape_out=(invalid_size,)) -# TODO: add doc strings class TestProcessModelAedatStream(unittest.TestCase): def test_init(self): - """ - Tests instantiation of the AedatStream process model. - """ + """Tests instantiation of the AedatStream ProcessModel.""" proc_params = { - "file_path": "../dvs_recording.aedat4", + "file_path": aedat_file_path, "shape_out": (3000,), "seed_sub_sampling": 0 } - pm = AedatStreamPM(proc_params) - self.assertIsInstance(pm, AedatStreamPM) - self.assertEqual(pm._shape_out, proc_params["shape_out"]) - self.assertIsInstance(pm._file, AedatFile) - self.assertIsInstance(pm._stream, - _AedatFileEventNumpyPacketIterator) - self.assertIsInstance(pm._frame_shape, tuple) - def test_run_without_sub_sampling(self): - """ - Tests whether running yields the expected behavior, given that the - user parameters are all correct. - """ + def test_streaming_from_aedat_file(self): + """Tests streaming from an aedat file.""" data_history = [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1], @@ -149,42 +88,33 @@ def test_run_without_sub_sampling(self): [464] ] - seed_rng = 0 max_num_events = 15 - data_loader = AedatStream(file_path="../dvs_recording.aedat4", - shape_out=(max_num_events,), - seed_sub_sampling=seed_rng) + data_loader = AedatStream(file_path=aedat_file_path, + shape_out=(max_num_events,)) recv_sparse = RecvSparse(shape=(max_num_events,)) data_loader.out_port.connect(recv_sparse.in_port) num_steps = 5 - run_cfg = Loihi1SimCfg() - run_cnd = RunSteps(num_steps=1) - for i in range(num_steps): - data_loader.run(condition=run_cnd, run_cfg=run_cfg) + data_loader.run(condition=RunSteps(num_steps=1), + run_cfg=Loihi1SimCfg()) expected_data = np.array(data_history[i]) expected_indices = np.array(indices_history[i]) - sent_and_received_data = \ - recv_sparse.data.get()[:expected_data.shape[0]] - sent_and_received_indices = \ - recv_sparse.idx.get()[:expected_indices.shape[0]] + received_data = recv_sparse.data.get()[:expected_data.shape[0]] + received_indices = recv_sparse.idx.get()[:expected_indices.shape[0]] - np.testing.assert_equal(sent_and_received_data, - expected_data) - np.testing.assert_equal(sent_and_received_indices, - expected_indices) + np.testing.assert_equal(received_data, expected_data) + np.testing.assert_equal(received_indices, expected_indices) data_loader.stop() - def test_sub_sampling(self): - """ - Tests whether we get the expected behavior when we set a max_num_events - that is smaller than the amount of events we receive in a given batch - (i.e. the process will sub-sample correctly). + def test_streaming_from_aedat_file_with_sub_sampling(self): + """Tests streaming from an aedat file when sub-sampling of the stream + becomes necessary. This is the case when the max_num_events is + smaller than the amount of events we receive in a given batch. """ expected_data = [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], @@ -193,163 +123,135 @@ def test_sub_sampling(self): [1, 1, 1, 1, 1, 1], [0] ] - expected_indices = [ - [1597., 2486., 2496., 2498., 1787., 2633., 1729., 1727., 2500., 1780.], + [1597, 2486, 2496, 2498, 1787, 2633, 1729, 1727, 2500, 1780], [1600, 1732, 2297, 1388, 2290, 2305, 3704, 3519, 1911], - [7138., 2301., 2982., 1364., 1379., 1386., 1384., 1390., 2289., 1362.], + [7138, 2301, 2982, 1364, 1379, 1386, 1384, 1390, 2289, 1362], [1910, 1382, 1909, 1562, 1606, 1381], [464] ] - seed_rng = 0 max_num_events = 10 - data_loader = AedatStream(file_path="../dvs_recording.aedat4", + data_loader = AedatStream(file_path=aedat_file_path, shape_out=(max_num_events,), - seed_sub_sampling=seed_rng) + seed_sub_sampling=0) recv_sparse = RecvSparse(shape=(max_num_events,)) data_loader.out_port.connect(recv_sparse.in_port) num_steps = 5 - run_cfg = Loihi1SimCfg() - run_cnd = RunSteps(num_steps=1) - for i in range(num_steps): - data_loader.run(condition=run_cnd, run_cfg=run_cfg) + data_loader.run(condition=RunSteps(num_steps=1), + run_cfg=Loihi1SimCfg()) - sent_and_received_data = \ - recv_sparse.data.get()[:len(expected_data[i])] - sent_and_received_indices = \ - recv_sparse.idx.get()[:len(expected_indices[i])] + received_data = recv_sparse.data.get()[:len(expected_data[i])] + received_indices = recv_sparse.idx.get()[:len(expected_indices[i])] - np.testing.assert_equal(sent_and_received_data, - expected_data[i]) - np.testing.assert_equal(sent_and_received_indices, - expected_indices[i]) + np.testing.assert_equal(received_data, expected_data[i]) + np.testing.assert_equal(received_indices, expected_indices[i]) data_loader.stop() - def test_sub_sampling_seed(self): - """ - Tests whether using different seeds does indeed result in different samples. - TODO: would testing on only 1 timestep be sufficient? - """ + def test_randomness_of_sub_sampling(self): + """Tests whether sub-sampling uses a random component to select the + events that are discarded. Using different seeds should result in + different sub-sampling.""" expected_indices_seed_0 = [ - [1597., 2486., 2496., 2498., 1787., 2633., 1729., 1727., 2500., 1780.], + [1597, 2486, 2496, 2498, 1787, 2633, 1729, 1727, 2500, 1780], [1600, 1732, 2297, 1388, 2290, 2305, 3704, 3519, 1911], - [7138., 2301., 2982., 1364., 1379., 1386., 1384., 1390., 2289., 1362.], + [7138, 2301, 2982, 1364, 1379, 1386, 1384, 1390, 2289, 1362], [1910, 1382, 1909, 1562, 1606, 1381], [464] ] - expected_indices_seed_1 = [ - [1597., 2308., 2486., 2496., 2498., 2642., 2489., 2488., 1727., 2500.], + [1597, 2308, 2486, 2496, 2498, 2642, 2489, 2488, 1727, 2500], [1600, 1732, 2297, 1388, 2290, 2305, 3704, 3519, 1911], - [7138., 2301., 1601., 1364., 1379., 1386., 1384., 1390., 2289., 1401.], + [7138, 2301, 1601, 1364, 1379, 1386, 1384, 1390, 2289, 1401], [1910, 1382, 1909, 1562, 1606, 1381], [464] ] - sent_and_received_indices_1 = [] - sent_and_received_indices_2 = [] + received_indices_0 = [] + received_indices_1 = [] max_num_events = 10 - seed_rng_run_1 = 0 - seed_rng_run_2 = 1 - data_loader_1 = AedatStream(file_path="../dvs_recording.aedat4", + # Architecture with seed 0. + data_loader_0 = AedatStream(file_path=aedat_file_path, shape_out=(max_num_events,), - seed_sub_sampling=seed_rng_run_1) - data_loader_2 = AedatStream(file_path="../dvs_recording.aedat4", - shape_out=(max_num_events,), - seed_sub_sampling=seed_rng_run_2) + seed_sub_sampling=0) + recv_sparse_0 = RecvSparse(shape=(max_num_events,)) + data_loader_0.out_port.connect(recv_sparse_0.in_port) + # Architecture with seed 1. + data_loader_1 = AedatStream(file_path=aedat_file_path, + shape_out=(max_num_events,), + seed_sub_sampling=1) recv_sparse_1 = RecvSparse(shape=(max_num_events,)) - recv_sparse_2 = RecvSparse(shape=(max_num_events,)) - data_loader_1.out_port.connect(recv_sparse_1.in_port) - data_loader_2.out_port.connect(recv_sparse_2.in_port) num_steps = 5 run_cfg = Loihi1SimCfg() run_cnd = RunSteps(num_steps=1) + # Run architecture with seed 0. for i in range(num_steps): - data_loader_1.run(condition=run_cnd, run_cfg=run_cfg) - - sent_and_received_indices_1.append \ - (recv_sparse_1.idx.get()[:len(expected_indices_seed_1[i])]) - - np.testing.assert_equal(sent_and_received_indices_1, - expected_indices_seed_0) - - data_loader_1.stop() + data_loader_0.run(condition=run_cnd, run_cfg=Loihi1SimCfg()) + idx = self._extract_idx(recv_sparse_0, expected_indices_seed_0, i) + received_indices_0.append(idx) + data_loader_0.stop() + # Run architecture with seed 1. for i in range(num_steps): - data_loader_2.run(condition=run_cnd, run_cfg=run_cfg) - - sent_and_received_indices_2.append \ - (recv_sparse_2.idx.get()[:len(expected_indices_seed_1[i])]) - - np.testing.assert_equal(sent_and_received_indices_2, - expected_indices_seed_1) - - data_loader_2.stop() + data_loader_1.run(condition=run_cnd, run_cfg=run_cfg) + idx = self._extract_idx(recv_sparse_1, expected_indices_seed_1, i) + received_indices_1.append(idx) + data_loader_1.stop() - def test_end_of_file(self): - """ - Tests whether we loop back to the beginning of the event stream when we reach - the end of the aedat4 file. The test file contains 27 time-steps. + # Indices from the individual runs should be as expected. + np.testing.assert_equal(received_indices_0, expected_indices_seed_0) + np.testing.assert_equal(received_indices_1, expected_indices_seed_1) + # Indices from the two runs should be different. + self.assertTrue(np.any(received_indices_0 != received_indices_1)) + + @staticmethod + def _extract_idx(recv_sparse: RecvSparse, + expected: ty.List[ty.List[int]], + time_step: int) -> ty.List[ty.List[int]]: + idx_array = recv_sparse.idx.get().astype(int) + idx_array_cropped = idx_array[:len(expected[time_step])] + return list(idx_array_cropped) + + def test_looping_over_end_of_file(self): + """Tests whether the stream loops back to the beginning of the aedat + file when reaching the end of the file. """ - data_time_steps_1_to_5 = [] - data_time_steps_28_to_32 = [] - indices_time_steps_1_to_5 = [] - indices_time_steps_28_to_32 = [] - - seed_rng = 0 max_num_events = 15 - data_loader = AedatStream(file_path="../dvs_recording.aedat4", + data_loader = AedatStream(file_path=aedat_file_path, shape_out=(max_num_events,), - seed_sub_sampling=seed_rng) + seed_sub_sampling=0) recv_sparse = RecvSparse(shape=(max_num_events,)) data_loader.out_port.connect(recv_sparse.in_port) - # Run parameters - num_steps = 32 - run_cfg = Loihi1SimCfg() - run_cnd = RunSteps(num_steps=1) + received_data = [] + received_indices = [] - # Running + num_steps = 32 for i in range(num_steps): - data_loader.run(condition=run_cnd, run_cfg=run_cfg) - # get data from the first 5 timesteps - if i in range(5): - data_time_steps_1_to_5.append \ - (recv_sparse.data.get()) - indices_time_steps_1_to_5.append \ - (recv_sparse.idx.get()) - - # get data from timesteps 28-32 - if i in range(27, 32): - data_time_steps_28_to_32.append \ - (recv_sparse.data.get()) - indices_time_steps_28_to_32.append \ - (recv_sparse.idx.get()) - - np.testing.assert_equal(data_time_steps_1_to_5, - data_time_steps_28_to_32) - np.testing.assert_equal(indices_time_steps_1_to_5, - indices_time_steps_28_to_32) - - # Stopping + data_loader.run(condition=RunSteps(num_steps=1), + run_cfg=Loihi1SimCfg()) + received_data.append(recv_sparse.data.get()) + received_indices.append(recv_sparse.idx.get()) data_loader.stop() + # The test file contains 27 time-steps. It is expected that the + # stream returns to the first entry in the 28th time step. + np.testing.assert_equal(received_data[27], received_data[0]) + np.testing.assert_equal(received_indices[27], received_indices[0]) + def test_index_encoding(self): - """ - Tests whether indices are correctly calculated during the process. - TODO: have less timesteps? maybe 2? (show it works for multiple timesteps with multiple sizes)? no difference in runtime - """ + """Tests whether indices are correctly converted from (x,y) format to + a linear index.""" x_history = [ [8, 12, 13, 13, 13, 9, 14, 14, 13, 13, 8, 9, 9, 13, 9], [8, 9, 12, 7, 12, 12, 20, 19, 10], @@ -374,40 +276,33 @@ def test_index_encoding(self): [172], [109] ] - seed_rng = 0 - rng = np.random.default_rng(seed=seed_rng) dense_shape = (240, 180) max_num_events = 15 - data_loader = AedatStream(file_path="../dvs_recording.aedat4", + data_loader = AedatStream(file_path=aedat_file_path, shape_out=(max_num_events,), - seed_sub_sampling=seed_rng) + seed_sub_sampling=0) recv_sparse = RecvSparse(shape=(max_num_events,)) data_loader.out_port.connect(recv_sparse.in_port) - # Run parameters num_steps = 9 - run_cfg = Loihi1SimCfg() - run_cnd = RunSteps(num_steps=1) - - # Running for i in range(num_steps): - data_loader.run(condition=run_cnd, run_cfg=run_cfg) + data_loader.run(condition=RunSteps(num_steps=1), + run_cfg=Loihi1SimCfg()) expected_xs = np.array(x_history[i]) expected_ys = np.array(y_history[i]) - sent_and_received_indices = \ + received_indices = \ recv_sparse.idx.get()[:expected_xs.shape[0]].astype(int) reconstructed_xs, reconstructed_ys = \ - np.unravel_index(sent_and_received_indices, dense_shape) + np.unravel_index(received_indices, dense_shape) np.testing.assert_equal(reconstructed_xs, expected_xs) np.testing.assert_equal(reconstructed_ys, expected_ys) - # Stopping data_loader.stop() From 5a8ceb21a61b6a7adf26f7a398a14c0e5154f4cc Mon Sep 17 00:00:00 2001 From: Mathis Richter Date: Fri, 9 Dec 2022 22:27:02 +0100 Subject: [PATCH 30/32] Added missing empty line. Signed-off-by: Mathis Richter --- tests/lava/proc/event_data/io/test_dv_stream.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/lava/proc/event_data/io/test_dv_stream.py b/tests/lava/proc/event_data/io/test_dv_stream.py index a95fa465c..c52e7f651 100644 --- a/tests/lava/proc/event_data/io/test_dv_stream.py +++ b/tests/lava/proc/event_data/io/test_dv_stream.py @@ -211,6 +211,7 @@ def test_run_spk_with_no_next(self) -> None: dv_stream.run(condition=RunSteps(num_steps=len(self._mock_packet_input)+1), run_cfg=Loihi1SimCfg()) dv_stream.stop() + if __name__ == '__main__': unittest.main() From 20a85bf0efcad11128c2bd11bf7a26974c20b987 Mon Sep 17 00:00:00 2001 From: SveaMeyer13 Date: Thu, 15 Dec 2022 17:23:04 +0100 Subject: [PATCH 31/32] ^testtesttest --- src/lava/proc/event_data/io/dv_stream.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/lava/proc/event_data/io/dv_stream.py b/src/lava/proc/event_data/io/dv_stream.py index 0b178c0d4..ecda68eae 100644 --- a/src/lava/proc/event_data/io/dv_stream.py +++ b/src/lava/proc/event_data/io/dv_stream.py @@ -105,13 +105,15 @@ def run_spk(self) -> None: data is sub-sampled if necessary, and then sent out. """ events = self._get_next_event_batch() + print("pieps") + print(events) # if we have not received a new batch - if not events: + if events is None: data = np.empty(self._shape_out) indices = np.empty(self._shape_out) warnings.warn("no events received") - elif not events["data"]: - warnings.warn() + # elif not events["data"]: + # warnings.warn() else: data, indices = encode_data_and_indices(self._frame_shape, events) From b3584624c9c9c36060c6c0afa2d204bc3aafca36 Mon Sep 17 00:00:00 2001 From: SveaMeyer13 Date: Fri, 16 Dec 2022 09:27:30 +0100 Subject: [PATCH 32/32] Revert "^testtesttest" This reverts commit 20a85bf0efcad11128c2bd11bf7a26974c20b987. --- src/lava/proc/event_data/io/dv_stream.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/lava/proc/event_data/io/dv_stream.py b/src/lava/proc/event_data/io/dv_stream.py index ecda68eae..0b178c0d4 100644 --- a/src/lava/proc/event_data/io/dv_stream.py +++ b/src/lava/proc/event_data/io/dv_stream.py @@ -105,15 +105,13 @@ def run_spk(self) -> None: data is sub-sampled if necessary, and then sent out. """ events = self._get_next_event_batch() - print("pieps") - print(events) # if we have not received a new batch - if events is None: + if not events: data = np.empty(self._shape_out) indices = np.empty(self._shape_out) warnings.warn("no events received") - # elif not events["data"]: - # warnings.warn() + elif not events["data"]: + warnings.warn() else: data, indices = encode_data_and_indices(self._frame_shape, events)