Skip to content

Commit

Permalink
Merge branch 'in_progess' into v1.0.0
Browse files Browse the repository at this point in the history
  • Loading branch information
Vivswan committed Jan 10, 2023
2 parents fbc32e2 + bceda05 commit 2cc9a34
Show file tree
Hide file tree
Showing 22 changed files with 1,017 additions and 555 deletions.
4 changes: 3 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
# AnalogVNN

[![arXiv](https://img.shields.io/badge/arXiv-2210.10048-orange.svg)](https://arxiv.org/abs/2210.10048)
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Vivswan/AnalogVNN/blob/v1.0.0/docs/_static/AnalogVNN_Demo.ipynb)

[![PyPI version](https://badge.fury.io/py/analogvnn.svg)](https://badge.fury.io/py/analogvnn)
[![Documentation Status](https://readthedocs.org/projects/analogvnn/badge/?version=stable)](https://analogvnn.readthedocs.io/en/stable/?badge=stable)
[![Python](https://img.shields.io/badge/python-3.7--3.11-blue)](https://badge.fury.io/py/analogvnn)
Expand All @@ -11,7 +13,7 @@ Documentation: [https://analogvnn.readthedocs.io/](https://analogvnn.readthedocs
## Installation:

- Install [PyTorch](https://pytorch.org/)
- Install AnanlogVNN using [pip](https://pypi.org/project/analogvnn/)
- Install AnalogVNN using [pip](https://pypi.org/project/analogvnn/)

```bash
pip install analogvnn
Expand Down
3 changes: 2 additions & 1 deletion analogvnn/fn/dirac_delta.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ def dirac_delta(x: TENSOR_OPERABLE, a: TENSOR_OPERABLE = 0.001) -> TENSOR_OPERAB
a (TENSOR_OPERABLE): standard deviation.
Returns:
TENSOR_OPERABLE: TENSOR_OPERABLE with the same shape as x, but with values equal to the Dirac delta function of x.
TENSOR_OPERABLE: TENSOR_OPERABLE with the same shape as x, but with values equal to the Dirac delta function
of x.
"""
return 1 / (np.abs(a) * np.sqrt(np.pi)) * np.exp(-((x / a) ** 2))
6 changes: 4 additions & 2 deletions analogvnn/fn/reduce_precision.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@ def reduce_precision(x: TENSOR_OPERABLE, precision: TENSOR_OPERABLE, divide: TEN
divide (TENSOR_OPERABLE): the number of bits to be reduced
Returns:
TENSOR_OPERABLE: TENSOR_OPERABLE with the same shape as x, but with values rounded to the nearest multiple of precision.
TENSOR_OPERABLE: TENSOR_OPERABLE with the same shape as x, but with values rounded to the nearest
multiple of precision.
"""
x = x if isinstance(x, Tensor) else torch.tensor(x, requires_grad=False)
g: Tensor = x * precision
Expand All @@ -36,7 +37,8 @@ def stochastic_reduce_precision(x: TENSOR_OPERABLE, precision: TENSOR_OPERABLE)
precision (TENSOR_OPERABLE): the precision of the quantization.
Returns:
TENSOR_OPERABLE: TENSOR_OPERABLE with the same shape as x, but with values rounded to the nearest multiple of precision.
TENSOR_OPERABLE: TENSOR_OPERABLE with the same shape as x, but with values rounded to the
nearest multiple of precision.
"""
g: Tensor = x * precision
rand_x = torch.rand_like(g, requires_grad=False)
Expand Down
2 changes: 0 additions & 2 deletions analogvnn/graph/AccumulateGrad.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ def __repr__(self):
Returns:
str: String representation of the module.
"""
# return f"AccumulateGrad"
return f"AccumulateGrad({self.module})"

def __call__(
Expand All @@ -62,7 +61,6 @@ def __call__(
forward_in_arg: Union[None, int, bool] = location['out_arg']
forward_in_kwarg: Union[None, str, bool] = location['out_kwarg']
predecessor: GRAPH_NODE_TYPE = location['from']
# print(out_kwarg, out_arg, value)

# 0 - not allowed

Expand Down
11 changes: 0 additions & 11 deletions analogvnn/graph/AcyclicDirectedGraph.py
Original file line number Diff line number Diff line change
Expand Up @@ -423,17 +423,6 @@ def parse_args_kwargs(
)
return inputs

# @staticmethod
# def print_inputs_outputs(input_output_graph, module):
# if len(input_output_graph[module].inputs.args) > 0:
# print(f"{module} :i: {input_output_graph[module].inputs.args}")
# if len(input_output_graph[module].inputs.kwargs.keys()) > 0:
# print(f"{module} :i: {input_output_graph[module].inputs.kwargs}")
# if len(input_output_graph[module].outputs.args) > 0:
# print(f"{module} :o: {input_output_graph[module].outputs.args}")
# if len(input_output_graph[module].outputs.kwargs.keys()) > 0:
# print(f"{module} :o: {input_output_graph[module].outputs.kwargs}")

def render(self, *args, real_label: bool = False, **kwargs) -> str:
"""Save the source to file and render with the Graphviz engine.
Expand Down
9 changes: 0 additions & 9 deletions analogvnn/graph/BackwardGraph.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,14 +153,12 @@ def from_forward(self, forward_graph: Union[AcyclicDirectedGraph, nx.DiGraph]) -
"in_kwarg": True,
"out_arg": True,
"out_kwarg": True,
# "label": f"* -> *",
"len": 0,
})

for v in graph.nodes():
new_graph.nodes[v]["fillcolor"] = "lightblue"
self.graph = new_graph
# self.graph = graph
return self

@torch.no_grad()
Expand Down Expand Up @@ -214,16 +212,13 @@ def _pass(self, from_node: GRAPH_NODE_TYPE, *args, **kwargs) -> Dict[GRAPH_NODE_

if isinstance(module, GraphEnum):
input_output_graph[module].outputs = input_output_graph[module].inputs
# print()
# self.print_inputs_outputs(input_output_graph, module)
continue

outputs = self._calculate_gradients(
module,
input_output_graph[module]
)
input_output_graph[module].outputs = ArgsKwargs.to_args_kwargs_object(outputs)
# self.print_inputs_outputs(input_output_graph, module)

return input_output_graph

Expand Down Expand Up @@ -293,10 +288,6 @@ def _calculate_gradients(
retain_graph=True,
allow_unused=True
)
# print()
# print(f"inputs: {inputs}")
# print(f"outputs: {outputs}")
# print(f"grad_outputs: {outputs_grads}")
for i, v in enumerate(out_grads):
grad_dict[inputs[i]] = v

Expand Down
2 changes: 0 additions & 2 deletions analogvnn/graph/ForwardGraph.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,15 +104,13 @@ def _pass(self, from_node: GraphEnum, *inputs: Tensor) -> Dict[GraphEnum, InputO

if isinstance(module, GraphEnum):
input_output_graph[module].outputs = input_output_graph[module].inputs
# self.print_inputs_outputs(input_output_graph, module)
continue

outputs = module(
*input_output_graph[module].inputs.args,
**input_output_graph[module].inputs.kwargs
)
input_output_graph[module].outputs = ArgsKwargs.to_args_kwargs_object(outputs)
# self.print_inputs_outputs(input_output_graph, module)

return input_output_graph

Expand Down
120 changes: 0 additions & 120 deletions analogvnn/graph/ModelGraph.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,8 @@
from __future__ import annotations

import torch
from torch import nn

from analogvnn.graph.BackwardGraph import BackwardGraph
from analogvnn.graph.ForwardGraph import ForwardGraph
from analogvnn.graph.ModelGraphState import ModelGraphState
from analogvnn.utils.make_dot import make_dot

__all__ = ['ModelGraph']

Expand Down Expand Up @@ -50,119 +46,3 @@ def compile(self, is_static: bool = True, auto_backward_graph: bool = False) ->
self.backward_graph.compile(is_static=is_static)
return self


if __name__ == '__main__':
mg = ModelGraph()
# l1 = torch.analogvnn.Linear(1, 1, bias=False)
l1 = nn.Linear(1, 1, bias=False)
l1.weight.data = torch.ones_like(l1.weight.data) * 2


def l2(*x):
return torch.add(*x), torch.sub(*x)


def l3(x, y):
return {"a": torch.sub(x, y), "b": torch.add(x, y)}


def l4(x, y, z, a, b):
return ((x + y) + (a + b)) + z


def l5(x):
return {"c": x * 0.5}


# l1 :: 1 -> 2
# l2 :: (1, 2) -> (3, -1)
# l3 :: (2, 3) -> {a: -1, b: 5}
# l4 :: (-1, 5, 2, 3, 1) -> 10
# l5 :: 10 -> {c: 5}

# l5 :: {c: 1} -> 0.5
# l4 :: 0.5 -> (0.5, 0.5, 0.5, 0.5, 0.5)
# l3 :: (0.5, 0.5) -> (1, 0)
# l2 :: (0.5, 0.5) -> (1, 0)
# l1 :: 0.5 + 1 + 0 -> 3
mg.forward_graph.add_edge(mg.INPUT, l1, in_arg=0)
mg.forward_graph.add_edge(mg.INPUT, l2)
mg.forward_graph.add_edge(l1, l2, out_arg=1)
mg.forward_graph.add_edge(l1, l3, out_arg=0)
mg.forward_graph.add_edge(l1, l3, out_arg=0)
mg.forward_graph.add_edge(l2, l3, in_arg=1, out_arg=1)
mg.forward_graph.add_edge(l2, l3, in_arg=0, out_arg=1)
mg.forward_graph.add_edge(l3, l4, in_kwarg=True, out_arg=True)
# mg.forward_graph.add_edge(l3, l4, in_kwarg="b", out_kwarg="y")
mg.forward_graph.add_edge(l1, l4, out_kwarg="z")
mg.forward_graph.add_edge(l2, l4, out_kwarg="a")
mg.forward_graph.add_edge(l2, l4, in_arg=1, out_kwarg="b")
mg.forward_graph.add_edge(l4, l5)
mg.forward_graph.add_edge(l5, mg.OUTPUT, in_kwarg="c", out_arg=0)

mg.compile(is_static=True, auto_backward_graph=True)
mg.forward_graph.render("../../_data/forward", real_label=True)
mg.backward_graph.render("../../_data/backward")

print()
print("Starting Forward Pass ::")
output = mg.forward_graph.calculate(torch.ones((1, 1), dtype=torch.float))
print(f"output = {output}")

print()
print("Grads ::")
mg.use_autograd_graph = True
inputs = torch.ones((1, 1), dtype=torch.float, requires_grad=True)
output = mg.forward_graph.calculate(inputs)
make_dot(output, params={
"input": inputs,
"output": output,
"l1.weight": l1.weight,
}).render("../../_data/model_graph", format="svg", cleanup=True)
for k in reversed(list(mg.forward_input_output_graph)):

output = mg.forward_graph.calculate(torch.ones((1, 1), dtype=torch.float, requires_grad=True))
v = mg.forward_input_output_graph[k]
print(f"{k} :o: ", end="")
if len(v.outputs.args) > 0:
grad = torch.autograd.grad(outputs=output, grad_outputs=torch.ones((1, 1), dtype=torch.float),
inputs=v.outputs.args)
print(f"{grad}, ", end="")

output = mg.forward_graph.calculate(torch.ones((1, 1), dtype=torch.float, requires_grad=True))
v = mg.forward_input_output_graph[k]
if len(v.outputs.kwargs.keys()) > 0:
grad = {vk: vv for vk, vv in zip(
list(v.outputs.kwargs.keys()),
torch.autograd.grad(outputs=output, grad_outputs=torch.ones((1, 1), dtype=torch.float),
inputs=list(v.outputs.kwargs.values()))
)}
print(f"{grad}, ", end="")

print()

output = mg.forward_graph.calculate(torch.ones((1, 1), dtype=torch.float, requires_grad=True))
v = mg.forward_input_output_graph[k]
print(f"{k} :i: ", end="")
if len(v.inputs.args) > 0:
grad = torch.autograd.grad(outputs=output, grad_outputs=torch.ones((1, 1), dtype=torch.float),
inputs=v.inputs.args)
print(f"{grad}, ", end="")

output = mg.forward_graph.calculate(torch.ones((1, 1), dtype=torch.float, requires_grad=True))
v = mg.forward_input_output_graph[k]
if len(v.inputs.kwargs.keys()) > 0:
grad = {vk: vv for vk, vv in zip(
list(v.inputs.kwargs.keys()),
torch.autograd.grad(outputs=output, grad_outputs=torch.ones((1, 1), dtype=torch.float),
inputs=list(v.inputs.kwargs.values()))
)}
print(f"{grad}, ", end="")

print()

print()
print("Starting Backward Pass ::")
mg.use_autograd_graph = False
output = mg.forward_graph.calculate(torch.ones((1, 1), dtype=torch.float))
print(mg.backward_graph.calculate(torch.ones((1, 1), dtype=torch.float)))
4 changes: 3 additions & 1 deletion analogvnn/graph/ModelGraphState.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@ class ModelGraphState:
Attributes:
allow_loops (bool): if True, the graph is allowed to contain loops.
forward_input_output_graph (Optional[Dict[GRAPH_NODE_TYPE, InputOutput]]): the input and output of the forward pass.
forward_input_output_graph (Optional[Dict[GRAPH_NODE_TYPE, InputOutput]]): the input and output of the
forward pass.
use_autograd_graph (bool): if True, the autograd graph is used to calculate the gradients.
_loss (Tensor): the loss.
INPUT (GraphEnum): GraphEnum.INPUT
Expand Down Expand Up @@ -49,6 +50,7 @@ def __init__(self, use_autograd_graph: bool = False, allow_loops=False):
self.forward_input_output_graph = None
self._loss = None

# noinspection PyUnusedLocal
@staticmethod
def ready_for_forward(exception: bool = False) -> bool:
"""Check if the state is ready for forward pass.
Expand Down
2 changes: 2 additions & 0 deletions analogvnn/nn/module/Layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ def __nn_Module_init_updated__(function: Callable) -> Callable:
Callable: Wrapped function
"""

# noinspection PyUnusedLocal
def _temp(*args, **kwargs) -> ...:
pass

Expand Down Expand Up @@ -192,6 +193,7 @@ def _forward_wrapper(self, function: Callable) -> Callable:
Returns:
Callable: Wrapped function.
"""
# noinspection PyUnresolvedReferences
if hasattr(function, "__wrapper__") and function.__wrapper__ == Layer._forward_wrapper:
return function

Expand Down
1 change: 0 additions & 1 deletion analogvnn/nn/noise/LaplacianNoise.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,6 @@ def calc_leakage(scale: TENSOR_OPERABLE, precision: TENSOR_OPERABLE) -> Tensor:
Returns:
float: the leakage of the Laplacian noise.
"""
# return math.exp((-1 / (2 * precision)) * (1 / scale))
return 2 * LaplacianNoise.static_cdf(x=-1 / (2 * precision), scale=scale)

@property
Expand Down

0 comments on commit 2cc9a34

Please sign in to comment.