Skip to content

Commit

Permalink
Revise tests (#116)
Browse files Browse the repository at this point in the history
Utilize complex-step numerical derivative in testing framework; improve strats

* fix broadcast-shape strat to respect min_dim=0; add tests

* add value check to choices strat; add unit test

* improve integer-index strat; add test

* improve slice strat: permit negative start/stop; add unit test

* improving basic indexing; tests passing; shrinking it weird

* fix shrinking behavior of basic_indexing; add docs

* improve shrinking for slices; add docs

* improve adv-index search strategy to shrink away from singleton dimensions

* begin porting numerical derivatives to complex step

* remove as_decimal from uber calls

* minor cleanup; some tests still failing - need finite difference

* docs formatting

* fix slice sctrat: step was never negative; permit None for start/stop

* fix tests for funcs not compatible with complex-step

* replace empty_like with empty in uber

* fix valid_axes and add tests - bug exposed in Sum/Mean

* fix edge case for mean when axis=()

* add StdDev (handles axis=() case for backprop)

* remove f-string

* restore rtol for cbrt test

* update hypothesis dependency to disallow non-array return from hnp.arrays

* update .gitignore

* remove np.empty from tests - guard against nan

* add complex step docs
  • Loading branch information
rsokl committed Mar 3, 2019
1 parent 24785af commit eb514c5
Show file tree
Hide file tree
Showing 26 changed files with 912 additions and 361 deletions.
5 changes: 5 additions & 0 deletions .gitignore
@@ -1,6 +1,11 @@
*.pyc
*.idea
*.egg-info
.tox/*
.cache/*
.coverage
dev_notebooks/*
venv/*
.hypothesis/*
**/.hypothesis/*
*.ipynb
Expand Down
6 changes: 5 additions & 1 deletion mygrad/math/sequential/funcs.py
@@ -1,6 +1,8 @@
from .ops import *
from mygrad.tensor_base import Tensor
from mygrad.math.misc.funcs import sqrt
from mygrad.math.arithmetic.funcs import multiply
from collections.abc import Sequence

__all__ = ["sum",
"mean",
Expand Down Expand Up @@ -308,7 +310,9 @@ def std(x, axis=None, ddof=0, keepdims=False, constant=False):
>>> mg.std(a, dtype=np.float64)
Tensor(0.44999999925494177)
"""
return sqrt(var(x, axis=axis, keepdims=keepdims, ddof=ddof, constant=constant))
return Tensor._op(StdDev, x, op_kwargs=dict(axis=axis,
keepdims=keepdims,
ddof=ddof), constant=constant)


def max(x, axis=None, keepdims=False, constant=False):
Expand Down
38 changes: 36 additions & 2 deletions mygrad/math/sequential/ops.py
@@ -1,9 +1,10 @@
from mygrad.operation_base import Operation
import numpy as np
from functools import reduce
from collections.abc import Sequence

__all__ = ["MaxMin", "Sum", "Mean", "Prod", "CumProd", "CumSum",
"Variance"]
"Variance", "StdDev"]


class MaxMin(Operation):
Expand Down Expand Up @@ -153,7 +154,7 @@ def backward_var(self, grad, index, **kwargs):
class Mean(Sum):
def __call__(self, a, axis=None, keepdims=False):
out = super(Mean, self).__call__(a, axis, keepdims)
self.n = a.data.size if not self.axis else np.prod([a.shape[i] for i in self.axis])
self.n = a.data.size if self.axis is None else np.prod([a.shape[i] for i in self.axis])
return out / self.n

def backward_var(self, grad, index, **kwargs):
Expand Down Expand Up @@ -382,3 +383,36 @@ def backward_var(self, grad, index, **kwargs):
back = (2. / N) * (a.data - a.data.mean(axis=self.kwargs["axis"], keepdims=True))
return back * grad


class StdDev(Operation):
def __call__(self, a, axis=None, keepdims=False, ddof=0):
""" Parameters
----------
a : mygrad.Tensor"""
self.variables = (a,)

if axis is not None and not hasattr(axis, "__iter__"):
axis = (axis,)

self.kwargs = dict(axis=axis, keepdims=keepdims, ddof=ddof)
return a.data.std(**self.kwargs)

def backward_var(self, grad, index, **kwargs):
a = self.variables[index]
if isinstance(self.kwargs["axis"], Sequence) and not self.kwargs["axis"]:
return np.zeros(a.shape, dtype=float)

N = a.size if self.kwargs["axis"] is None else np.prod([a.shape[i] for i in self.kwargs["axis"]])
N -= self.kwargs["ddof"]

grad = np.asarray(grad) / (2 * np.sqrt(a.data.var(**self.kwargs)))
if grad.ndim == 0:
grad = np.full(a.shape, grad, dtype=float)
else:
if not self.kwargs["keepdims"]:
index = [slice(None)] * a.ndim
for i in self.kwargs["axis"]:
index[i] = np.newaxis
grad = grad[tuple(index)]
back = (2. / N) * (a.data - a.data.mean(axis=self.kwargs["axis"], keepdims=True))
return back * grad
2 changes: 1 addition & 1 deletion setup.py
Expand Up @@ -21,7 +21,7 @@
]

INSTALL_REQUIRES = ['numpy >= 1.12']
TESTS_REQUIRE = ['pytest >= 3.8', 'hypothesis >= 4.0', 'scipy']
TESTS_REQUIRE = ['pytest >= 3.8', 'hypothesis >= 4.6', 'scipy']

DESCRIPTION = "A sleek auto-differentiation library that wraps numpy."
LONG_DESCRIPTION = """
Expand Down

0 comments on commit eb514c5

Please sign in to comment.