Skip to content

Commit

Permalink
add aesara ops; move np to approxmath.np
Browse files Browse the repository at this point in the history
  • Loading branch information
brendanashworth committed Dec 27, 2021
1 parent 6d28d3c commit e082b24
Show file tree
Hide file tree
Showing 9 changed files with 247 additions and 35 deletions.
3 changes: 2 additions & 1 deletion .gitignore
Expand Up @@ -2,4 +2,5 @@ build
*.so
__pycache__
dist
MANIFEST
MANIFEST
*.egg-info
18 changes: 17 additions & 1 deletion README.md
Expand Up @@ -19,7 +19,7 @@ If your program doesn't depend on the full precision of floating point operation

Benchmarks were run on a 2019 MacBook Pro (1.4 GHz Quad-Core Intel Core i5), with Python 3.9.9 and NumPy 1.21.4.

### Drop-in Replacement
### Drop-in Replacement for NumPy

```python
import approxmath.np as npa
Expand All @@ -38,6 +38,22 @@ npa.exp(np.array([-1., 0., 1.]))
# array([0.36787944, 1. , 2.71828183])
```

### Drop-in Replacement for Aesara Tensor

```python
import approxmath.aesara as att
import aesara.tensor as tt
```

Functional equivalents:

| Aesara | Approxmath | Relative Speed-Up |
|--------|------------|-------------------|
| `tt.exp` | `att.exp` | 15x |
| `tt.log` | `att.log` | 15x |
| `tt.cos` | `att.cos` | 13x |
| `tt.sin` | `att.sin` | 13x |

### Installation

```sh
Expand Down
1 change: 1 addition & 0 deletions requirements.txt
Expand Up @@ -2,3 +2,4 @@ numpy==1.21.4
pytest==6.2.4
pytest-benchmark==3.4.1
pypandoc==1.7.2
aesara==2.3.3
2 changes: 1 addition & 1 deletion setup.py
Expand Up @@ -10,7 +10,7 @@
long_description = open('README.md').read()

setup(name="approxmath",
version="1.0.1",
version="2.0.0",
description="Fast approximate math functions: log, exp, sin, cos",
author="Brendan Ashworth",
author_email="brendan.ashworth@me.com",
Expand Down
18 changes: 17 additions & 1 deletion src/approxmath/aesara/__init__.py
@@ -1 +1,17 @@
from .approx_log_op import ApproxLogOp
from .basic import ApproxLogOp, ApproxExpOp
from .trig import ApproxCosOp, ApproxSinOp
import aesara.tensor as tt
import aesara

# initialize and export ops
x_exp = tt.matrix()
exp = aesara.function([x_exp], ApproxExpOp()(x_exp))

x_log = tt.matrix()
log = aesara.function([x_log], ApproxLogOp()(x_log))

x_cos = tt.matrix()
cos = aesara.function([x_cos], ApproxCosOp()(x_cos))

x_sin = tt.matrix()
sin = aesara.function([x_sin], ApproxSinOp()(x_sin))
31 changes: 0 additions & 31 deletions src/approxmath/aesara/approx_log_op.py

This file was deleted.

59 changes: 59 additions & 0 deletions src/approxmath/aesara/basic.py
@@ -0,0 +1,59 @@
import aesara
from aesara.graph.op import Op
from aesara.graph.basic import Apply
import aesara.tensor as tt
import approxmath.np as npa

class ApproxExpOp(Op):
__props__ = ()

itypes = [aesara.tensor.dmatrix]
otypes = [aesara.tensor.dmatrix]

def perform(self, node, inputs, output_storage):
x = inputs[0]
z = output_storage[0]
z[0] = npa.exp(x)

def infer_shape(self, fgraph, node, i0_shapes):
return i0_shapes

def grad(self, inputs, output_grads):
# d/dx (exp x) => x
return [output_grads[0] * tt.exp(inputs[0])]

def R_op(self, inputs, eval_points):
# R_op can receive None as eval_points.
# That mean there is no diferientiable path through that input
# If this imply that you cannot compute some outputs,
# return None for those.
if eval_points[0] is None:
return eval_points
return self.grad(inputs, eval_points)

class ApproxLogOp(Op):
__props__ = ()

itypes = [aesara.tensor.dmatrix]
otypes = [aesara.tensor.dmatrix]

def perform(self, node, inputs, output_storage):
x = inputs[0]
z = output_storage[0]
z[0] = npa.log(x)

def infer_shape(self, fgraph, node, i0_shapes):
return i0_shapes

def grad(self, inputs, output_grads):
# d/dx (ln x) => 1 / x
return [output_grads[0] / inputs[0]]

def R_op(self, inputs, eval_points):
# R_op can receive None as eval_points.
# That mean there is no diferientiable path through that input
# If this imply that you cannot compute some outputs,
# return None for those.
if eval_points[0] is None:
return eval_points
return self.grad(inputs, eval_points)
59 changes: 59 additions & 0 deletions src/approxmath/aesara/trig.py
@@ -0,0 +1,59 @@
import aesara
from aesara.graph.op import Op
from aesara.graph.basic import Apply
import aesara.tensor as tt
import approxmath.np as npa

class ApproxSinOp(Op):
__props__ = ()

itypes = [aesara.tensor.dmatrix]
otypes = [aesara.tensor.dmatrix]

def perform(self, node, inputs, output_storage):
x = inputs[0]
z = output_storage[0]
z[0] = npa.sin(x)

def infer_shape(self, fgraph, node, i0_shapes):
return i0_shapes

def grad(self, inputs, output_grads):
# d/dx (sin x) => cos x
return [tt.cos(inputs[0]) * output_grads[0]]

def R_op(self, inputs, eval_points):
# R_op can receive None as eval_points.
# That mean there is no diferientiable path through that input
# If this imply that you cannot compute some outputs,
# return None for those.
if eval_points[0] is None:
return eval_points
return self.grad(inputs, eval_points)

class ApproxCosOp(Op):
__props__ = ()

itypes = [aesara.tensor.dmatrix]
otypes = [aesara.tensor.dmatrix]

def perform(self, node, inputs, output_storage):
x = inputs[0]
z = output_storage[0]
z[0] = npa.cos(x)

def infer_shape(self, fgraph, node, i0_shapes):
return i0_shapes

def grad(self, inputs, output_grads):
# d/dx (cos x) => -sin x
return [-1 * tt.sin(inputs[0]) * output_grads[0]]

def R_op(self, inputs, eval_points):
# R_op can receive None as eval_points.
# That mean there is no diferientiable path through that input
# If this imply that you cannot compute some outputs,
# return None for those.
if eval_points[0] is None:
return eval_points
return self.grad(inputs, eval_points)
91 changes: 91 additions & 0 deletions test_approxmath.py
@@ -1,4 +1,7 @@
import approxmath.np as npa
import approxmath.aesara as att
import aesara.tensor as tt
import aesara
import math
import numpy as np
import pytest
Expand All @@ -12,6 +15,94 @@ def test_battery():
assert np.all(np.isclose(npa.sin(xs), np.sin(xs)))
assert np.all(np.isclose(npa.cos(xs), np.cos(xs)))

x_val = np.random.rand(25, 30)

def test_aesara_log_op():
x = tt.matrix()
f = aesara.function([x], att.ApproxLogOp()(x))
out = f(x_val)
assert np.allclose(np.log(x_val), out)

def test_aesara_log():
out = att.log(x_val)
assert np.allclose(np.log(x_val), out)

def test_aesara_exp_op():
x = tt.matrix()
f = aesara.function([x], att.ApproxExpOp()(x))
out = f(x_val)
assert np.allclose(np.exp(x_val), out)

def test_aesara_exp():
out = att.exp(x_val)
assert np.allclose(np.exp(x_val), out)

def test_aesara_sin_op():
x = tt.matrix()
f = aesara.function([x], att.ApproxSinOp()(x))
out = f(x_val)
assert np.allclose(np.sin(x_val), out)

def test_aesara_sin():
out = att.sin(x_val)
assert np.allclose(np.sin(x_val), out)

def test_aesara_cos_op():
x = tt.matrix()
f = aesara.function([x], att.ApproxCosOp()(x))
out = f(x_val)
assert np.allclose(np.cos(x_val), out)

def test_aesara_cos():
out = att.cos(x_val)
assert np.allclose(np.cos(x_val), out)

def test_aesara_exp_grad():
aesara.gradient.verify_grad(att.ApproxExpOp(), [x_val], rng=np.random.RandomState())

def test_aesara_log_grad():
aesara.gradient.verify_grad(att.ApproxLogOp(), [x_val], rng=np.random.RandomState())

def test_aesara_sin_grad():
aesara.gradient.verify_grad(att.ApproxSinOp(), [x_val], rng=np.random.RandomState())

def test_aesara_cos_grad():
aesara.gradient.verify_grad(att.ApproxCosOp(), [x_val], rng=np.random.RandomState())

x_val_bench = np.random.rand(50, 50)

@pytest.mark.benchmark(group="aesara_log")
def test_att_log_bench(benchmark):
benchmark(att.log, x_val_bench)

@pytest.mark.benchmark(group="aesara_log")
def test_tt_log_bench(benchmark):
benchmark(tt.log, x_val_bench)

@pytest.mark.benchmark(group="aesara_exp")
def test_att_exp_bench(benchmark):
benchmark(att.exp, x_val_bench)

@pytest.mark.benchmark(group="aesara_exp")
def test_tt_exp_bench(benchmark):
benchmark(tt.exp, x_val_bench)

@pytest.mark.benchmark(group="aesara_sin")
def test_att_sin_bench(benchmark):
benchmark(att.sin, x_val_bench)

@pytest.mark.benchmark(group="aesara_sin")
def test_tt_sin_bench(benchmark):
benchmark(tt.sin, x_val_bench)

@pytest.mark.benchmark(group="aesara_cos")
def test_att_cos_bench(benchmark):
benchmark(att.cos, x_val_bench)

@pytest.mark.benchmark(group="aesara_cos")
def test_tt_cos_bench(benchmark):
benchmark(tt.cos, x_val_bench)

@pytest.mark.benchmark(group="log")
def test_fast_log_bench(benchmark):
benchmark(npa.log, posxs)
Expand Down

0 comments on commit e082b24

Please sign in to comment.