Skip to content

Commit

Permalink
Removed duplicated code + pep8
Browse files Browse the repository at this point in the history
  • Loading branch information
pbrod committed Jul 21, 2016
1 parent 287f1c7 commit fb363c5
Show file tree
Hide file tree
Showing 4 changed files with 80 additions and 102 deletions.
2 changes: 1 addition & 1 deletion conda_recipe/run_test.py
@@ -1,3 +1,3 @@
import numdifftools

numdifftools.test()
numdifftools.test()
9 changes: 6 additions & 3 deletions numdifftools/info.py
Expand Up @@ -152,16 +152,19 @@
===============
The `numdifftools package <http://pypi.python.org/pypi/numdifftools/>`_ for
`Python <https://www.python.org/>`_ was written by Per A. Brodtkorb
based on the adaptive numerical differentiation toolbox written in `Matlab <http://www.mathworks.com>`_ by John D'Errico [DErrico2006]_.
based on the adaptive numerical differentiation toolbox written in
`Matlab <http://www.mathworks.com>`_ by John D'Errico [DErrico2006]_.
Numdifftools has as of version 0.9 been extended with some of the functionality
found in the statsmodels.tools.numdiff module written by Josef Perktold [Perktold2014]_.
found in the statsmodels.tools.numdiff module written by Josef Perktold
[Perktold2014]_.
References
===========
.. [DErrico2006] D'Errico, J. R. (2006), Adaptive Robust Numerical Differentiation
.. [DErrico2006] D'Errico, J. R. (2006),
Adaptive Robust Numerical Differentiation
http://www.mathworks.com/matlabcentral/fileexchange/13490-adaptive-robust-numerical-differentiation
.. [Perktold2014] Perktold, J (2014), numdiff package
Expand Down
119 changes: 58 additions & 61 deletions numdifftools/nd_algopy.py
Expand Up @@ -211,67 +211,6 @@ def _reverse(self, x, *args, **kwds):
return y.reshape(shape0)


class Jacobian(_Derivative):
__doc__ = _cmn_doc % dict(
derivative='Jacobian',
extra_parameter="",
extra_note="", returns="""
Returns
-------
jacob : array
Jacobian
""", example="""
Example
-------
>>> import numdifftools.nd_algopy as nda
#(nonlinear least squares)
>>> xdata = np.reshape(np.arange(0,1,0.1),(-1,1))
>>> ydata = 1+2*np.exp(0.75*xdata)
>>> f = lambda c: (c[0]+c[1]*np.exp(c[2]*xdata) - ydata)**2
Jfun = nda.Jacobian(f) # Todo: This does not work
Jfun([1,2,0.75]).T # should be numerically zero
array([[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> Jfun2 = nda.Jacobian(f, method='reverse')
>>> Jfun2([1,2,0.75]).T # should be numerically zero
array([[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> f2 = lambda x : x[0]*x[1]*x[2] + np.exp(x[0])*x[1]
>>> Jfun3 = nda.Jacobian(f2)
>>> Jfun3([3.,5.,7.])
array([[ 135.42768462, 41.08553692, 15. ]])
>>> Jfun4 = nda.Jacobian(f2, method='reverse')
>>> Jfun4([3,5,7])
array([[ 135.42768462, 41.08553692, 15. ]])
""", see_also="""
See also
--------
Derivative
Gradient,
Hessdiag,
Hessian,
""")

def _forward(self, x, *args, **kwds):
# forward mode without building the computational graph
tmp = algopy.UTPM.init_jacobian(x)
y = self.f(tmp, *args, **kwds)
return np.atleast_2d(algopy.UTPM.extract_jacobian(y))

def _reverse(self, x, *args, **kwds):
x = np.atleast_1d(x)
c_graph = self.computational_graph(x, *args, **kwds)
return c_graph.jacobian(x)


class Gradient(_Derivative):
__doc__ = _cmn_doc % dict(
derivative='Gradient',
Expand Down Expand Up @@ -330,6 +269,64 @@ def _reverse(self, x, *args, **kwds):
return c_graph.gradient(x)


class Jacobian(Gradient):
__doc__ = _cmn_doc % dict(
derivative='Jacobian',
extra_parameter="",
extra_note="", returns="""
Returns
-------
jacob : array
Jacobian
""", example="""
Example
-------
>>> import numdifftools.nd_algopy as nda
#(nonlinear least squares)
>>> xdata = np.reshape(np.arange(0,1,0.1),(-1,1))
>>> ydata = 1+2*np.exp(0.75*xdata)
>>> f = lambda c: (c[0]+c[1]*np.exp(c[2]*xdata) - ydata)**2
Jfun = nda.Jacobian(f) # Todo: This does not work
Jfun([1,2,0.75]).T # should be numerically zero
array([[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> Jfun2 = nda.Jacobian(f, method='reverse')
>>> Jfun2([1,2,0.75]).T # should be numerically zero
array([[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> f2 = lambda x : x[0]*x[1]*x[2] + np.exp(x[0])*x[1]
>>> Jfun3 = nda.Jacobian(f2)
>>> Jfun3([3.,5.,7.])
array([[ 135.42768462, 41.08553692, 15. ]])
>>> Jfun4 = nda.Jacobian(f2, method='reverse')
>>> Jfun4([3,5,7])
array([[ 135.42768462, 41.08553692, 15. ]])
""", see_also="""
See also
--------
Derivative
Gradient,
Hessdiag,
Hessian,
""")

def _forward(self, x, *args, **kwds):
return np.atleast_2d(super(Jacobian, self)._forward(x, *args, **kwds))

def _reverse(self, x, *args, **kwds):
x = np.atleast_1d(x)
c_graph = self.computational_graph(x, *args, **kwds)
return c_graph.jacobian(x)


class Hessian(_Derivative):
__doc__ = _cmn_doc % dict(
derivative='Hessian',
Expand Down
52 changes: 15 additions & 37 deletions numdifftools/run_benchmark.py
Expand Up @@ -85,66 +85,44 @@ def loglimits(data, border=0.05):
hessian_funs[method2] = ndc_hessian(1, method=method, step=epsilon)


def compute_gradients(gradient_funs, problem_sizes):

results_gradient_list = []
def _compute_benchmark(functions, problem_sizes):
result_list = []
for n in problem_sizes:
print('n=', n)
num_methods = len(gradient_funs)
results_gradient = np.zeros((num_methods, 3))
num_methods = len(functions)
results = np.zeros((num_methods, 3))
ref_g = None
f = BenchmarkFunction(n)
for i, (_key, gradient_f) in enumerate(gradient_funs.items()):
for i, (_key, function) in enumerate(functions.items()):
t = timeit.default_timer()
gradient_f.f = f
function.f = f
preproc_time = timeit.default_timer() - t
t = timeit.default_timer()
x = 3 * np.ones(n)
val = gradient_f(x)
val = function(x)
run_time = timeit.default_timer() - t
if ref_g is None:
ref_g = val
err = 0
norm_ref_g = np.linalg.norm(ref_g)
else:
err = np.linalg.norm(val - ref_g) / norm_ref_g
results_gradient[i] = run_time, err, preproc_time
results[i] = run_time, err, preproc_time

results_gradient_list.append(results_gradient)
result_list.append(results)

results_gradients = np.array(results_gradient_list) + 1e-16
return np.array(result_list) + 1e-16

def compute_gradients(gradient_funs, problem_sizes):
print('starting gradient computation ')
results_gradients = _compute_benchmark(gradient_funs, problem_sizes)
print('results_gradients=\n', results_gradients)
return results_gradients


def compute_hessians(hessian_funs, problem_sizes):
print('starting hessian computation ')
results_hessian_list = []
for n in problem_sizes:
print('n=', n)
num_methods = len(hessian_funs)
results_hessian = np.zeros((num_methods, 3))
ref_h = None
f = BenchmarkFunction(n)
for i, (_key, hessian_f) in enumerate(hessian_funs.items()):
t = timeit.default_timer()
hessian_f.f = f
preproc_time = timeit.default_timer() - t
t = timeit.default_timer()
x = 3 * np.ones(n)
val = hessian_f(x)
run_time = timeit.default_timer() - t
if ref_h is None:
ref_h = val
err = 0.0
norm_ref_h = np.linalg.norm(ref_h.ravel())
else:
err = np.linalg.norm((val - ref_h).ravel()) / norm_ref_h
results_hessian[i] = run_time, err, preproc_time

results_hessian_list.append(results_hessian)

results_hessians = np.array(results_hessian_list) + 1e-16
results_hessians = _compute_benchmark(hessian_funs, problem_sizes)
print(problem_sizes)
print('results_hessians=\n', results_hessians)
return results_hessians
Expand Down

0 comments on commit fb363c5

Please sign in to comment.