Skip to content

Commit

Permalink
Merge pull request #14 from Vivswan/master
Browse files Browse the repository at this point in the history
v1.0.0rc6
  • Loading branch information
Vivswan committed Jan 14, 2023
2 parents e120f7d + 94a09b7 commit 34f0a20
Show file tree
Hide file tree
Showing 68 changed files with 684 additions and 308 deletions.
3 changes: 3 additions & 0 deletions .flake8
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,10 @@ extend-ignore =
C101, # Coding magic comment
D100, # Missing docstring in public module
D104, # Missing docstring in public package
D202, # No blank lines allowed after function docstring
D401, # First line should be in imperative mood
R504, # unnecessary variable assignment before return statement
R505, # unnecessary else after return statement

per-file-ignores =
sample_code.py: D100, D101, D102, D103, D104
Expand Down
1 change: 1 addition & 0 deletions .github/CODEOWNERS
Validating CODEOWNERS rules …
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
* @vivswan
10 changes: 5 additions & 5 deletions .readthedocs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ sphinx:
formats: all
# Optionally declare the Python requirements required to build your docs
python:
install:
- method: pip
path: .
extra_requirements:
- doc
install:
- method: pip
path: .
extra_requirements:
- doc
12 changes: 10 additions & 2 deletions analogvnn/__init__.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,19 @@
import sys

import importlib_metadata
if sys.version_info[:2] >= (3, 8):
from importlib import metadata
else:
import importlib_metadata as metadata # pragma: no cover


__package__ = 'analogvnn'
__version__ = importlib_metadata.version(__package__)
__author__ = 'Vivswan Shah (vivswanshah@pitt.edu)'

try:
__version__ = metadata.version(__package__)
except metadata.PackageNotFoundError:
__version__ = '0.0.0'

if sys.version_info < (3, 7, 0):
import warnings

Expand Down
5 changes: 5 additions & 0 deletions analogvnn/backward/BackwardFunction.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ def __init__(self, backward_function: Callable, layer: nn.Module = None):
backward_function (Callable): The function used to compute the backward gradient.
layer (nn.Module): The layer that this backward module is associated with.
"""

super(BackwardFunction, self).__init__(layer)
self._backward_function = backward_function

Expand All @@ -37,6 +38,7 @@ def backward_function(self) -> Callable:
Returns:
Callable: The function used to compute the backward gradient.
"""

return self._backward_function

@backward_function.setter
Expand All @@ -46,6 +48,7 @@ def backward_function(self, backward_function: Callable):
Args:
backward_function (Callable): The function used to compute the backward gradient with.
"""

self.set_backward_function(backward_function)

def set_backward_function(self, backward_function: Callable) -> BackwardFunction:
Expand All @@ -57,6 +60,7 @@ def set_backward_function(self, backward_function: Callable) -> BackwardFunction
Returns:
BackwardFunction: self.
"""

self._backward_function = backward_function
return self

Expand All @@ -73,6 +77,7 @@ def backward(self, *grad_output: Tensor, **grad_output_kwarg: Tensor) -> TENSORS
Raises:
NotImplementedError: If the backward function is not set.
"""

if self._backward_function is None:
raise ValueError('set backward_function first before invoking backward')

Expand Down
1 change: 1 addition & 0 deletions analogvnn/backward/BackwardIdentity.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ def backward(self, *grad_output: Tensor, **grad_output_kwarg: Tensor) -> TENSORS
Returns:
TENSORS: The gradients of the input of the layer.
"""

if len(grad_output) == 0:
return None

Expand Down
14 changes: 14 additions & 0 deletions analogvnn/backward/BackwardModule.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ def forward(ctx: Any, backward_module: BackwardModule, _: Tensor, *args: Tensor,
Returns:
TENSORS: The output of the function.
"""

ctx.backward_module = backward_module
return ctx.backward_module._call_impl_forward(*args, **kwargs)

Expand All @@ -64,6 +65,7 @@ def backward(ctx: Any, *grad_outputs: Tensor) -> Tuple[None, None, TENSORS]:
Returns:
TENSORS: The gradients of the input of the function.
"""

backward_module: BackwardModule = ctx.backward_module
results = backward_module._call_impl_backward(*grad_outputs)

Expand All @@ -78,6 +80,7 @@ def __init__(self, layer: nn.Module = None):
Args:
layer (nn.Module): The layer for which the backward gradient is computed.
"""

super(BackwardModule, self).__init__()
self._layer = None
self._set_autograd_backward()
Expand All @@ -97,6 +100,7 @@ def forward(self, *inputs: Tensor, **inputs_kwarg: Tensor) -> TENSORS:
Raises:
NotImplementedError: If the forward pass is not implemented.
"""

raise NotImplementedError(f'Module [{type(self).__name__}] is missing the required "forward" function')

forward._implemented = False
Expand All @@ -115,6 +119,7 @@ def backward(self, *grad_outputs: Tensor, **grad_output_kwarg: Tensor) -> TENSOR
Raises:
NotImplementedError: If the backward pass is not implemented.
"""

raise NotImplementedError(f'Module [{type(self).__name__}] is missing the required "backward" function')

def _call_impl_forward(self, *args: Tensor, **kwarg: Tensor) -> TENSORS:
Expand All @@ -127,6 +132,7 @@ def _call_impl_forward(self, *args: Tensor, **kwarg: Tensor) -> TENSORS:
Returns:
TENSORS: The output of the layer.
"""

return self.forward(*args, **kwarg)

def _call_impl_backward(self, *grad_output: Tensor, **grad_output_kwarg: Tensor) -> TENSORS:
Expand All @@ -139,6 +145,7 @@ def _call_impl_backward(self, *grad_output: Tensor, **grad_output_kwarg: Tensor)
Returns:
TENSORS: The gradients of the input of the layer.
"""

return self.backward(*grad_output, **grad_output_kwarg)

__call__: Callable[..., Any] = _call_impl_backward
Expand All @@ -155,6 +162,7 @@ def auto_apply(self, *args: Tensor, to_apply=True, **kwargs: Tensor) -> TENSORS:
Returns:
TENSORS: The output of the layer.
"""

if to_apply and not self._disable_autograd_backward:
return self._autograd_backward.apply(self, self._empty_holder_tensor, *args, **kwargs)
else:
Expand All @@ -166,6 +174,7 @@ def has_forward(self) -> bool:
Returns:
bool: True if the forward pass is implemented, False otherwise.
"""

return not hasattr(self.forward, '_implemented')

@property
Expand All @@ -175,6 +184,7 @@ def layer(self) -> Optional[nn.Module]:
Returns:
Optional[nn.Module]: layer
"""

return self.get_layer()

def get_layer(self) -> Optional[nn.Module]:
Expand All @@ -183,6 +193,7 @@ def get_layer(self) -> Optional[nn.Module]:
Returns:
Optional[nn.Module]: layer
"""

if isinstance(self, nn.Module):
return self
else:
Expand All @@ -202,6 +213,7 @@ def set_layer(self, layer: Optional[nn.Module]) -> BackwardModule:
ValueError: If the layer is already set.
ValueError: If the layer is not an instance of nn.Module.
"""

if isinstance(self, nn.Module):
raise ValueError('layer of Backward Module is set to itself')
if self._layer is not None:
Expand Down Expand Up @@ -237,6 +249,7 @@ def set_grad_of(tensor: Tensor, grad: Tensor) -> Optional[Tensor]:
Returns:
Optional[Tensor]: the gradient of the tensor.
"""

if tensor is None or tensor.requires_grad is False:
return None

Expand Down Expand Up @@ -267,6 +280,7 @@ def __getattr__(self, name: str) -> Any:
Raises:
AttributeError: If the attribute is not found.
"""

if isinstance(self, nn.Module) or self == self._layer:
return super(BackwardModule, self).__getattr__(name)
if not str(name).startswith('__') and self._layer is not None and hasattr(self._layer, name):
Expand Down
1 change: 1 addition & 0 deletions analogvnn/backward/BackwardUsingForward.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,4 +21,5 @@ def backward(self, *grad_output: Tensor, **grad_output_kwarg: Tensor) -> TENSORS
Returns:
TENSORS: The gradients of the input of the layer.
"""

return self._layer.forward(*grad_output, **grad_output_kwarg)
1 change: 1 addition & 0 deletions analogvnn/fn/dirac_delta.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,5 @@ def dirac_delta(x: TENSOR_OPERABLE, a: TENSOR_OPERABLE = 0.001) -> TENSOR_OPERAB
TENSOR_OPERABLE: TENSOR_OPERABLE with the same shape as x, but with values equal to the Dirac delta function
of x.
"""

return 1 / (np.abs(a) * np.sqrt(np.pi)) * np.exp(-((x / a) ** 2))
2 changes: 2 additions & 0 deletions analogvnn/fn/reduce_precision.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ def reduce_precision(x: TENSOR_OPERABLE, precision: TENSOR_OPERABLE, divide: TEN
TENSOR_OPERABLE: TENSOR_OPERABLE with the same shape as x, but with values rounded to the nearest
multiple of precision.
"""

x = x if isinstance(x, Tensor) else torch.tensor(x, requires_grad=False)
g: Tensor = x * precision
f = torch.sign(g) * torch.maximum(
Expand All @@ -38,6 +39,7 @@ def stochastic_reduce_precision(x: TENSOR_OPERABLE, precision: TENSOR_OPERABLE)
TENSOR_OPERABLE: TENSOR_OPERABLE with the same shape as x, but with values rounded to the
nearest multiple of precision.
"""

g: Tensor = x * precision
rand_x = torch.rand_like(g, requires_grad=False)

Expand Down
1 change: 1 addition & 0 deletions analogvnn/fn/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ def test(model: nn.Module, test_loader: DataLoader, test_run: bool = False) -> T
Returns:
tuple: the loss and accuracy of the model on the test set.
"""

model.eval()
total_loss = 0
total_accuracy = 0
Expand Down
1 change: 1 addition & 0 deletions analogvnn/fn/to_matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ def to_matrix(tensor: Tensor) -> Tensor:
Returns:
Tensor: Tensor with the same values as the tensor, but with shape (1, -1).
"""

if len(tensor.size()) == 1:
return tensor.view(1, -1)
return tensor
1 change: 1 addition & 0 deletions analogvnn/fn/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ def train(
Returns:
tuple: the loss and accuracy of the model on the train set.
"""

model.train()
total_loss = 0.0
total_accuracy = 0
Expand Down
3 changes: 3 additions & 0 deletions analogvnn/graph/AccumulateGrad.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ def __init__(self, module: Union[nn.Module, Callable]):
Args:
module (Union[nn.Module, Callable]): Module from which to accumulate gradients.
"""

super(AccumulateGrad, self).__init__()
self.input_output_connections = {}
self.module = module
Expand All @@ -39,6 +40,7 @@ def __repr__(self):
Returns:
str: String representation of the module.
"""

return f'AccumulateGrad({self.module})'

def __call__( # noqa: C901
Expand All @@ -55,6 +57,7 @@ def __call__( # noqa: C901
Returns:
ArgsKwargs: The output gradients.
"""

grad_inputs_args = {}
grad_inputs_kwargs = {}
for key, grad_output in grad_outputs_args_kwargs.kwargs.items():
Expand Down
11 changes: 11 additions & 0 deletions analogvnn/graph/AcyclicDirectedGraph.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ def __init__(self, graph_state: ModelGraphState = None):
Raises:
NotImplementedError: If allow_loops is True, since this is not implemented yet.
"""

super(AcyclicDirectedGraph, self).__init__()
self.graph = nx.MultiDiGraph()
self.graph_state = graph_state
Expand All @@ -69,6 +70,7 @@ def __call__(self, *args, **kwargs):
Raises:
NotImplementedError: since method is abstract
"""

raise NotImplementedError

def add_connection(self, *args: GRAPH_NODE_TYPE):
Expand All @@ -80,6 +82,7 @@ def add_connection(self, *args: GRAPH_NODE_TYPE):
Returns:
AcyclicDirectedGraph: self.
"""

for i in range(1, len(args)):
self.add_edge(args[i - 1], args[i])
return self
Expand All @@ -106,6 +109,7 @@ def add_edge(
Returns:
AcyclicDirectedGraph: self.
"""

attr = self.check_edge_parameters(in_arg, in_kwarg, out_arg, out_kwarg)
existing_edges = self.graph.get_edge_data(u_of_edge, v_of_edge)

Expand Down Expand Up @@ -148,6 +152,7 @@ def check_edge_parameters(
Raises:
ValueError: If in and out parameters are invalid.
"""

# @@@ in_arg: None in_kwarg: None out_arg: None out_kwarg: None 0
# @@ in_arg: True in_kwarg: None out_arg: True out_kwarg: None 1
# in_arg: None in_kwarg: True out_arg: True out_kwarg: None 2
Expand Down Expand Up @@ -237,6 +242,7 @@ def _create_edge_label(
Returns:
str: The edge's label.
"""

label = ''
if in_arg == in_kwarg == out_arg == out_kwarg is True:
return '* -> *'
Expand Down Expand Up @@ -274,6 +280,7 @@ def compile(self, is_static: bool = True):
Raises:
ValueError: If the graph is not acyclic.
"""

for i in nx.simple_cycles(self.graph):
raise ValueError(f'Cycle detected: {i}')

Expand All @@ -292,6 +299,7 @@ def _reindex_out_args(graph: nx.MultiDiGraph) -> nx.MultiDiGraph:
Returns:
nx.MultiDiGraph: The graph with re-indexed output arguments.
"""

# noinspection PyTypeChecker
graph: nx.MultiDiGraph = graph.copy()

Expand Down Expand Up @@ -324,6 +332,7 @@ def _create_static_sub_graph(
Returns:
List[Tuple[GRAPH_NODE_TYPE, List[GRAPH_NODE_TYPE]]]: The static sub graph.
"""

if self._is_static and from_node in self._static_graphs:
return self._static_graphs[from_node]

Expand Down Expand Up @@ -358,6 +367,7 @@ def parse_args_kwargs( # noqa: C901
Returns:
ArgsKwargs: The arguments and keyword arguments.
"""

args = {}
extra_args = []
kwargs = {}
Expand Down Expand Up @@ -436,4 +446,5 @@ def render(self, *args, real_label: bool = False, **kwargs) -> str:
Returns:
str: The (possibly relative) path of the rendered file.
"""

return to_graphviz_digraph(self.graph, real_label=real_label).render(*args, **kwargs)

0 comments on commit 34f0a20

Please sign in to comment.