diff --git a/conda_recipe/run_test.py b/conda_recipe/run_test.py
index 71bde75..352438c 100644
--- a/conda_recipe/run_test.py
+++ b/conda_recipe/run_test.py
@@ -1,3 +1,3 @@
import numdifftools
-numdifftools.test()
\ No newline at end of file
+numdifftools.test()
diff --git a/numdifftools/info.py b/numdifftools/info.py
index 034286e..dc52594 100644
--- a/numdifftools/info.py
+++ b/numdifftools/info.py
@@ -152,16 +152,19 @@
===============
The `numdifftools package `_ for
`Python `_ was written by Per A. Brodtkorb
-based on the adaptive numerical differentiation toolbox written in `Matlab `_ by John D'Errico [DErrico2006]_.
+based on the adaptive numerical differentiation toolbox written in
+`Matlab `_ by John D'Errico [DErrico2006]_.
Numdifftools has as of version 0.9 been extended with some of the functionality
-found in the statsmodels.tools.numdiff module written by Josef Perktold [Perktold2014]_.
+found in the statsmodels.tools.numdiff module written by Josef Perktold
+[Perktold2014]_.
References
===========
-.. [DErrico2006] D'Errico, J. R. (2006), Adaptive Robust Numerical Differentiation
+.. [DErrico2006] D'Errico, J. R. (2006),
+ Adaptive Robust Numerical Differentiation
http://www.mathworks.com/matlabcentral/fileexchange/13490-adaptive-robust-numerical-differentiation
.. [Perktold2014] Perktold, J (2014), numdiff package
diff --git a/numdifftools/nd_algopy.py b/numdifftools/nd_algopy.py
index b6d4e6a..43f267b 100644
--- a/numdifftools/nd_algopy.py
+++ b/numdifftools/nd_algopy.py
@@ -211,67 +211,6 @@ def _reverse(self, x, *args, **kwds):
return y.reshape(shape0)
-class Jacobian(_Derivative):
- __doc__ = _cmn_doc % dict(
- derivative='Jacobian',
- extra_parameter="",
- extra_note="", returns="""
- Returns
- -------
- jacob : array
- Jacobian
- """, example="""
- Example
- -------
- >>> import numdifftools.nd_algopy as nda
-
- #(nonlinear least squares)
-
- >>> xdata = np.reshape(np.arange(0,1,0.1),(-1,1))
- >>> ydata = 1+2*np.exp(0.75*xdata)
- >>> f = lambda c: (c[0]+c[1]*np.exp(c[2]*xdata) - ydata)**2
-
- Jfun = nda.Jacobian(f) # Todo: This does not work
- Jfun([1,2,0.75]).T # should be numerically zero
- array([[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
- [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
- [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
-
- >>> Jfun2 = nda.Jacobian(f, method='reverse')
- >>> Jfun2([1,2,0.75]).T # should be numerically zero
- array([[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
- [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
- [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
-
- >>> f2 = lambda x : x[0]*x[1]*x[2] + np.exp(x[0])*x[1]
- >>> Jfun3 = nda.Jacobian(f2)
- >>> Jfun3([3.,5.,7.])
- array([[ 135.42768462, 41.08553692, 15. ]])
-
- >>> Jfun4 = nda.Jacobian(f2, method='reverse')
- >>> Jfun4([3,5,7])
- array([[ 135.42768462, 41.08553692, 15. ]])
- """, see_also="""
- See also
- --------
- Derivative
- Gradient,
- Hessdiag,
- Hessian,
- """)
-
- def _forward(self, x, *args, **kwds):
- # forward mode without building the computational graph
- tmp = algopy.UTPM.init_jacobian(x)
- y = self.f(tmp, *args, **kwds)
- return np.atleast_2d(algopy.UTPM.extract_jacobian(y))
-
- def _reverse(self, x, *args, **kwds):
- x = np.atleast_1d(x)
- c_graph = self.computational_graph(x, *args, **kwds)
- return c_graph.jacobian(x)
-
-
class Gradient(_Derivative):
__doc__ = _cmn_doc % dict(
derivative='Gradient',
@@ -330,6 +269,64 @@ def _reverse(self, x, *args, **kwds):
return c_graph.gradient(x)
+class Jacobian(Gradient):
+ __doc__ = _cmn_doc % dict(
+ derivative='Jacobian',
+ extra_parameter="",
+ extra_note="", returns="""
+ Returns
+ -------
+ jacob : array
+ Jacobian
+ """, example="""
+ Example
+ -------
+ >>> import numdifftools.nd_algopy as nda
+
+ #(nonlinear least squares)
+
+ >>> xdata = np.reshape(np.arange(0,1,0.1),(-1,1))
+ >>> ydata = 1+2*np.exp(0.75*xdata)
+ >>> f = lambda c: (c[0]+c[1]*np.exp(c[2]*xdata) - ydata)**2
+
+ Jfun = nda.Jacobian(f) # Todo: This does not work
+ Jfun([1,2,0.75]).T # should be numerically zero
+ array([[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
+ [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
+ [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
+
+ >>> Jfun2 = nda.Jacobian(f, method='reverse')
+ >>> Jfun2([1,2,0.75]).T # should be numerically zero
+ array([[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
+ [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
+ [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
+
+ >>> f2 = lambda x : x[0]*x[1]*x[2] + np.exp(x[0])*x[1]
+ >>> Jfun3 = nda.Jacobian(f2)
+ >>> Jfun3([3.,5.,7.])
+ array([[ 135.42768462, 41.08553692, 15. ]])
+
+ >>> Jfun4 = nda.Jacobian(f2, method='reverse')
+ >>> Jfun4([3,5,7])
+ array([[ 135.42768462, 41.08553692, 15. ]])
+ """, see_also="""
+ See also
+ --------
+ Derivative
+ Gradient,
+ Hessdiag,
+ Hessian,
+ """)
+
+ def _forward(self, x, *args, **kwds):
+ return np.atleast_2d(super(Jacobian, self)._forward(x, *args, **kwds))
+
+ def _reverse(self, x, *args, **kwds):
+ x = np.atleast_1d(x)
+ c_graph = self.computational_graph(x, *args, **kwds)
+ return c_graph.jacobian(x)
+
+
class Hessian(_Derivative):
__doc__ = _cmn_doc % dict(
derivative='Hessian',
diff --git a/numdifftools/run_benchmark.py b/numdifftools/run_benchmark.py
index 12e23ec..e03b16d 100644
--- a/numdifftools/run_benchmark.py
+++ b/numdifftools/run_benchmark.py
@@ -85,22 +85,21 @@ def loglimits(data, border=0.05):
hessian_funs[method2] = ndc_hessian(1, method=method, step=epsilon)
-def compute_gradients(gradient_funs, problem_sizes):
-
- results_gradient_list = []
+def _compute_benchmark(functions, problem_sizes):
+ result_list = []
for n in problem_sizes:
print('n=', n)
- num_methods = len(gradient_funs)
- results_gradient = np.zeros((num_methods, 3))
+ num_methods = len(functions)
+ results = np.zeros((num_methods, 3))
ref_g = None
f = BenchmarkFunction(n)
- for i, (_key, gradient_f) in enumerate(gradient_funs.items()):
+ for i, (_key, function) in enumerate(functions.items()):
t = timeit.default_timer()
- gradient_f.f = f
+ function.f = f
preproc_time = timeit.default_timer() - t
t = timeit.default_timer()
x = 3 * np.ones(n)
- val = gradient_f(x)
+ val = function(x)
run_time = timeit.default_timer() - t
if ref_g is None:
ref_g = val
@@ -108,43 +107,22 @@ def compute_gradients(gradient_funs, problem_sizes):
norm_ref_g = np.linalg.norm(ref_g)
else:
err = np.linalg.norm(val - ref_g) / norm_ref_g
- results_gradient[i] = run_time, err, preproc_time
+ results[i] = run_time, err, preproc_time
- results_gradient_list.append(results_gradient)
+ result_list.append(results)
- results_gradients = np.array(results_gradient_list) + 1e-16
+ return np.array(result_list) + 1e-16
+
+def compute_gradients(gradient_funs, problem_sizes):
+ print('starting gradient computation ')
+ results_gradients = _compute_benchmark(gradient_funs, problem_sizes)
print('results_gradients=\n', results_gradients)
return results_gradients
def compute_hessians(hessian_funs, problem_sizes):
print('starting hessian computation ')
- results_hessian_list = []
- for n in problem_sizes:
- print('n=', n)
- num_methods = len(hessian_funs)
- results_hessian = np.zeros((num_methods, 3))
- ref_h = None
- f = BenchmarkFunction(n)
- for i, (_key, hessian_f) in enumerate(hessian_funs.items()):
- t = timeit.default_timer()
- hessian_f.f = f
- preproc_time = timeit.default_timer() - t
- t = timeit.default_timer()
- x = 3 * np.ones(n)
- val = hessian_f(x)
- run_time = timeit.default_timer() - t
- if ref_h is None:
- ref_h = val
- err = 0.0
- norm_ref_h = np.linalg.norm(ref_h.ravel())
- else:
- err = np.linalg.norm((val - ref_h).ravel()) / norm_ref_h
- results_hessian[i] = run_time, err, preproc_time
-
- results_hessian_list.append(results_hessian)
-
- results_hessians = np.array(results_hessian_list) + 1e-16
+ results_hessians = _compute_benchmark(hessian_funs, problem_sizes)
print(problem_sizes)
print('results_hessians=\n', results_hessians)
return results_hessians