Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Float2Fixed converter [Originally PR #399] #666

Closed
wants to merge 31 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
a427884
Squashed commit of the following:
ackurth-nc Oct 6, 2022
73b2cd6
Fix typos, change comment
ackurth-nc Oct 6, 2022
add0204
Update comment
ackurth-nc Oct 6, 2022
c9289d8
Disregard recording Procs, fix bug when certain var is 0, catch more …
ackurth-nc Oct 10, 2022
41e0454
Add example of shallow network
ackurth-nc Oct 11, 2022
96d55e3
Introduce precision data class; adapt converter, tests, proc models a…
ackurth-nc Oct 12, 2022
eb05f8c
Forgot PorcModels
ackurth-nc Oct 12, 2022
f36349a
Adapt converter
ackurth-nc Oct 12, 2022
3693e1c
Adapt lif
ackurth-nc Oct 12, 2022
e377d03
Add docstring to class
ackurth-nc Oct 12, 2022
40958fe
Remove falsely added code
ackurth-nc Oct 12, 2022
8092d31
Improve docstrings of tests
ackurth-nc Oct 12, 2022
0117a66
Move conversion var variable from global to class
ackurth-nc Oct 12, 2022
ffc5674
Correct grammatical error
ackurth-nc Oct 12, 2022
4a8c692
Rename rcfg to run_cfg
ackurth-nc Oct 12, 2022
024e597
Add specific windows step for poetry
mgkwill Oct 12, 2022
6aa6984
Merge branch 'main' into dev/f2f_conv
mgkwill Oct 12, 2022
f2ca84d
Fix windows poetry run command
mgkwill Oct 12, 2022
58fd18a
Address some reviewes
ackurth-nc Oct 12, 2022
f955fba
Merge remote-tracking branch 'origin/dev/f2f_conv' into dev/f2f_conv
ackurth-nc Oct 12, 2022
7d43032
Merge remote-tracking branch 'upstream/main' into dev/f2f_conv
ackurth Oct 31, 2022
8dc7625
Renamed testing class
ackurth Oct 31, 2022
feb0b3a
Make dyn_range to tuple
ackurth Oct 31, 2022
d388bc2
Allow passing num_bits_exp 0 without error
ackurth Oct 31, 2022
d5ba6a7
Merge remote-tracking branch 'upstream/main' into dev/f2f_conv
ackurth Nov 17, 2022
dc000f8
Update test_tutorials.py
PhilippPlank Nov 17, 2022
81fc550
Adapt usage example in script for documentation
ackurth Nov 21, 2022
93722c7
Fix type in docstring
ackurth Nov 21, 2022
e29f938
Add plotting function for displaying distribution of parameters and q…
ackurth Nov 21, 2022
4768163
Merge branch 'main' into f2f_conv
Apr 12, 2023
b7d5929
Merge branch 'main' into f2f_conv
mgkwill Jul 17, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions src/lava/magma/core/model/precision.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
# See: https://spdx.org/licenses/
from dataclasses import dataclass


@dataclass
class Precision:
"""Precision information for floating- to fixed-point conversion."""
is_signed: bool = True
num_bits: int = None
implicit_shift: int = None
101 changes: 100 additions & 1 deletion src/lava/magma/core/model/py/type.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,10 @@

import typing as ty
from dataclasses import dataclass
from lava.magma.core.model.precision import Precision
from lava.magma.core.model.py.ports import PyInPort, PyOutPort, PyRefPort
import numpy as np
import warnings


@dataclass
Expand All @@ -13,4 +16,100 @@
type, ty.Type[PyInPort], ty.Type[PyOutPort], ty.Type[PyRefPort]
]
d_type: type
precision: int = None # If None, infinite precision is assumed
precision: str = None # If None, infinite precision is assumed.
num_bits_exp: int = None # If None, fixed exponent in fixed-pt model.
exp_var: str = None # Name of Var in which exponent is stored, if needed.
domain: np.ndarray = None # If None, no a-priori knowledge of Var domain.
constant: bool = False # If True, indicates that Var is constant.
meta_parameter: bool = False # If True, indicates that Var is config var
# Indicate to which scale domain Var belongs, i.e., with which
# Vars scaling needs to be consistent. Scale domains are identified by
# integers: '0' indicates the global scale domain shared between all
# Processes. Other integers are reserved for scale domains local to one
# Process.
# By default all Vars are assumed to be in same scale domain.
scale_domain: int = 0

# Members of LavaPyType needed for float- to fixed-point conversion.
conv_vars: tuple = ('num_bits_exp', 'domain', 'constant', 'scale_domain',
'exp_var', 'is_signed', 'num_bits', 'implicit_shift')

@staticmethod
def _validate_precision(precision):
"""Validates the format of the precision passed to the LavaPyType.

Raises
------
Warning
If precision is None, might cause error in Float2Fixed Point
Conversion
TypeError
If precision is not of type str or None
ValueError
If precision passed in wrong format
"""

if precision is None:
warnings.warn("'precision' is None: This might cause an error in"
+ " the Float2FixedPoint conversion.")
elif type(precision) is not Precision:

Check notice on line 55 in src/lava/magma/core/model/py/type.py

View check run for this annotation

Codacy Production / Codacy Static Code Analysis

src/lava/magma/core/model/py/type.py#L55

Use isinstance() rather than type() for a typecheck.
raise TypeError("'precision' must be of type Precision or None"
+ f" but has type {type(precision)}.")
else:
# Check if is_signed was correctly assigned".
if not type(precision.is_signed) is bool:

Check notice on line 60 in src/lava/magma/core/model/py/type.py

View check run for this annotation

Codacy Production / Codacy Static Code Analysis

src/lava/magma/core/model/py/type.py#L60

Use isinstance() rather than type() for a typecheck.
raise ValueError("'is_signed' has type"
+ f"{type(precision.is_signed)}, but must"
+ " be bool.")

# Check if number of bits and implicit shift was set correctly.
if not type(precision.num_bits) is int:

Check notice on line 66 in src/lava/magma/core/model/py/type.py

View check run for this annotation

Codacy Production / Codacy Static Code Analysis

src/lava/magma/core/model/py/type.py#L66

Use isinstance() rather than type() for a typecheck.
raise ValueError("'num_bits' has type"
+ f"{type(precision.num_bits)}, but must"
+ " be int.")

if not type(precision.implicit_shift) is int:

Check notice on line 71 in src/lava/magma/core/model/py/type.py

View check run for this annotation

Codacy Production / Codacy Static Code Analysis

src/lava/magma/core/model/py/type.py#L71

Use isinstance() rather than type() for a typecheck.
raise ValueError("'num_bits' has type"
+ f"{type(precision.implicit_shift)}, but"
+ " must be int.")

def _validate_exp_data(self):
"""Validates if data considered in a split of the variable in mantissa
and exponent is complete.

Raises
------
ValueError
If exponent data is not complete
"""

if self.num_bits_exp and not self.exp_var:
raise ValueError("Provided number of bits for exponent but no"
+ " name for exponent variable.")
if (self.exp_var and not self.num_bits_exp) and self.num_bits_exp != 0:
# Evaluates to true if exp_var given and num_bits_exp None.
# If num_bits_exp is greater or equalt than 0, evaluates to False.
raise ValueError("Provided name for exponent variable but not"
+ " number of bits for exponent..")

def conversion_data(self):
"""Get data for variables needed for float- to fixed-point conversion
defined in conv_vars.

Returns
-------
conv_data : dict
Conversion data dictionary, keys defined in conv_vars
"""
LavaPyType._validate_precision(self.precision)
self._validate_exp_data()

conv_data = {}

for key in self.conv_vars:
if key in ['is_signed', 'num_bits', 'implicit_shift']:
conv_data[key] = self.precision.__getattribute__(key)
else:
conv_data[key] = self.__getattribute__(key)

return conv_data
30 changes: 25 additions & 5 deletions src/lava/proc/dense/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol
from lava.magma.core.model.py.ports import PyInPort, PyOutPort
from lava.magma.core.model.py.type import LavaPyType
from lava.magma.core.model.precision import Precision
from lava.magma.core.resources import CPU
from lava.magma.core.decorator import implements, requires, tag
from lava.magma.core.model.py.model import PyLoihiProcessModel
Expand Down Expand Up @@ -60,13 +61,32 @@ class AbstractPyDenseModelBitAcc(PyLoihiProcessModel):
it mimics Loihi behavior bit-by-bit.
"""

s_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, bool, precision=1)
a_out: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, np.int32, precision=16)
a_buff: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=16)
s_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, bool,
precision=Precision(is_signed=False,
num_bits=1,
implicit_shift=0))
a_out: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, np.int32,
precision=Precision(is_signed=True,
num_bits=16,
implicit_shift=0))
a_buff: np.ndarray = LavaPyType(np.ndarray, np.int32,
precision=Precision(is_signed=True,
num_bits=16,
implicit_shift=0))
# weights is a 2D matrix of form (num_flat_output_neurons,
# num_flat_input_neurons) in C-order (row major).
weights: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=8)
num_message_bits: np.ndarray = LavaPyType(np.ndarray, int, precision=5)
weights: np.ndarray = LavaPyType(np.ndarray, np.int32,
precision=Precision(is_signed=True,
num_bits=8,
implicit_shift=0),
num_bits_exp=3, constant=True,
exp_var='weight_exp')
num_message_bits: np.ndarray = LavaPyType(np.ndarray, int,
meta_parameter=True,
precision=Precision(
is_signed=False,
num_bits=5,
implicit_shift=0))

def __init__(self, proc_params):
super().__init__(proc_params)
Expand Down
51 changes: 40 additions & 11 deletions src/lava/proc/lif/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol
from lava.magma.core.model.py.ports import PyInPort, PyOutPort
from lava.magma.core.model.py.type import LavaPyType
from lava.magma.core.model.precision import Precision
from lava.magma.core.resources import CPU
from lava.magma.core.decorator import implements, requires, tag
from lava.magma.core.model.py.model import PyLoihiProcessModel
Expand Down Expand Up @@ -74,15 +75,38 @@ class AbstractPyLifModelFixed(PyLoihiProcessModel):
leaky-integrate-and-fire neuron model. Implementations like those
bit-accurate with Loihi hardware inherit from here.
"""

a_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, np.int16, precision=16)
a_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, np.int16,
precision=Precision(is_signed=True,
num_bits=16,
implicit_shift=6))
s_out: None # This will be an OutPort of different LavaPyTypes
u: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=24)
v: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=24)
du: int = LavaPyType(int, np.uint16, precision=12)
dv: int = LavaPyType(int, np.uint16, precision=12)
bias_mant: np.ndarray = LavaPyType(np.ndarray, np.int16, precision=13)
bias_exp: np.ndarray = LavaPyType(np.ndarray, np.int16, precision=3)
u: np.ndarray = LavaPyType(np.ndarray, np.int32,
precision=Precision(is_signed=True,
num_bits=24,
implicit_shift=0))
v: np.ndarray = LavaPyType(np.ndarray, np.int32,
precision=Precision(is_signed=True,
num_bits=24,
implicit_shift=0))
du: int = LavaPyType(int, np.uint16,
precision=Precision(is_signed=False,
num_bits=12,
implicit_shift=0),
scale_domain=1, domain=np.array([0, 1]),
constant=True)
dv: int = LavaPyType(int, np.uint16,
precision=Precision(is_signed=False,
num_bits=12,
implicit_shift=0),
scale_domain=1, domain=np.array([0, 1]),
constant=True)
bias_mant: np.ndarray = LavaPyType(np.ndarray, np.int16, constant=True,
precision=Precision(is_signed=True,
num_bits=13,
implicit_shift=0),
num_bits_exp=3, exp_var='bias_exp')
bias_exp: np.ndarray = LavaPyType(np.ndarray, np.int16,
meta_parameter=True)

def __init__(self, proc_params):
super(AbstractPyLifModelFixed, self).__init__(proc_params)
Expand Down Expand Up @@ -257,9 +281,14 @@ class PyLifModelBitAcc(AbstractPyLifModelFixed):
- vth: unsigned 17-bit integer (0 to 131071).

"""

s_out: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, np.int32, precision=24)
vth: int = LavaPyType(int, np.int32, precision=17)
s_out: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, np.int32,
precision=Precision(is_signed=True,
num_bits=24,
implicit_shift=0))
vth: int = LavaPyType(int, np.int32, constant=True,
precision=Precision(is_signed=False,
num_bits=17,
implicit_shift=6))

def __init__(self, proc_params):
super(PyLifModelBitAcc, self).__init__(proc_params)
Expand Down
1 change: 1 addition & 0 deletions src/lava/proc/lif/process.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ def __init__(
**kwargs,
)

self.shape = shape
self.a_in = InPort(shape=shape)
self.s_out = OutPort(shape=shape)
self.u = Var(shape=shape, init=u)
Expand Down
Loading