-
Notifications
You must be signed in to change notification settings - Fork 22
multidimensional kerneldiff and butterdiff #181
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Large diffs are not rendered by default.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -3,12 +3,12 @@ | |
| import scipy.signal | ||
|
|
||
| # included code | ||
| from pynumdiff.finite_difference import second_order as finite_difference | ||
| from pynumdiff.finite_difference import finitediff | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why I wasn't doing this before I'm not sure. |
||
| from pynumdiff.polynomial_fit import splinediff as _splinediff # patch through | ||
| from pynumdiff.utils import utility | ||
|
|
||
|
|
||
| def kerneldiff(x, dt, kernel='friedrichs', window_size=5, num_iterations=1): | ||
| def kerneldiff(x, dt, kernel='friedrichs', window_size=5, num_iterations=1, axis=0): | ||
| """Differentiate by applying a smoothing kernel to the signal, then performing 2nd-order finite difference. | ||
| :code:`meandiff`, :code:`mediandiff`, :code:`gaussiandiff`, and :code:`friedrichsdiff` call this function. | ||
|
|
||
|
|
@@ -18,23 +18,25 @@ def kerneldiff(x, dt, kernel='friedrichs', window_size=5, num_iterations=1): | |
| :code:`'friedrichs'`} | ||
| :param int window_size: filtering kernel size | ||
| :param int num_iterations: how many times to apply mean smoothing | ||
| :param int axis: data dimension along which differentiation is performed | ||
|
|
||
| :return: - **x_hat** (np.array) -- estimated (smoothed) x | ||
| - **dxdt_hat** (np.array) -- estimated derivative of x | ||
| """ | ||
| if kernel in ['mean', 'gaussian', 'friedrichs']: | ||
| kernel = getattr(utility, f"{kernel}_kernel")(window_size) | ||
| x_hat = utility.convolutional_smoother(x, kernel, num_iterations) | ||
| x_hat = utility.convolutional_smoother(x, kernel, num_iterations, axis=axis) | ||
| elif kernel == 'median': | ||
| if not window_size % 2: window_size += 1 # make sure window_size is odd, else medfilt throws error | ||
| s = [1]*x.ndim; s[axis] = window_size | ||
|
|
||
| x_hat = x | ||
| for _ in range(num_iterations): | ||
| x_hat = scipy.signal.medfilt(x_hat, window_size) | ||
| x_hat = scipy.signal.medfilt(x_hat, s) | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
| else: | ||
| raise ValueError("filter_type must be mean, median, gaussian, or friedrichs") | ||
|
|
||
| return finite_difference(x_hat, dt) | ||
| return finitediff(x_hat, dt, order=2, axis=axis) | ||
|
|
||
|
|
||
| def meandiff(x, dt, params=None, options={}, window_size=5, num_iterations=1): | ||
|
|
@@ -140,7 +142,7 @@ def friedrichsdiff(x, dt, params=None, options={}, window_size=5, num_iterations | |
| return kerneldiff(x, dt, kernel='friedrichs', window_size=window_size, num_iterations=num_iterations) | ||
|
|
||
|
|
||
| def butterdiff(x, dt, params=None, options={}, filter_order=2, cutoff_freq=0.5, num_iterations=1): | ||
| def butterdiff(x, dt, params=None, options={}, filter_order=2, cutoff_freq=0.5, num_iterations=1, axis=0): | ||
| """Perform butterworth smoothing on x with scipy.signal.filtfilt followed by second order finite difference | ||
|
|
||
| :param np.array[float] x: data to differentiate | ||
|
|
@@ -152,6 +154,7 @@ def butterdiff(x, dt, params=None, options={}, filter_order=2, cutoff_freq=0.5, | |
| :param float cutoff_freq: cutoff frequency :math:`\\in [0, 1]`. For a discrete vector, the | ||
| value is normalized to the range 0-1, where 1 is the Nyquist frequency. | ||
| :param int num_iterations: how many times to apply smoothing | ||
| :param int axis: data dimension along which differentiation is performed | ||
|
|
||
| :return: - **x_hat** (np.array) -- estimated (smoothed) x | ||
| - **dxdt_hat** (np.array) -- estimated derivative of x | ||
|
|
@@ -166,11 +169,11 @@ def butterdiff(x, dt, params=None, options={}, filter_order=2, cutoff_freq=0.5, | |
| b, a = scipy.signal.butter(filter_order, cutoff_freq) | ||
|
|
||
| x_hat = x | ||
| padlen = len(x)-1 if len(x) < 9 else None | ||
| padlen = x.shape[axis]-1 if x.shape[axis] < 9 else None | ||
| for _ in range(num_iterations): | ||
| x_hat = scipy.signal.filtfilt(b, a, x_hat, method="pad", padlen=padlen) # applies forward and backward pass so zero phase | ||
| x_hat = scipy.signal.filtfilt(b, a, x_hat, axis=axis, method="pad", padlen=padlen) # applies forward and backward pass so zero phase | ||
|
|
||
| return finite_difference(x_hat, dt) | ||
| return finitediff(x_hat, dt, order=2, axis=axis) | ||
|
|
||
|
|
||
| def splinediff(*args, **kwargs): # pragma: no cover pylint: disable=missing-function-docstring | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -2,13 +2,13 @@ | |
| import numpy as np | ||
| from pytest import mark | ||
|
|
||
| from ..smooth_finite_difference import kerneldiff, mediandiff, meandiff, gaussiandiff, friedrichsdiff, butterdiff | ||
| from ..finite_difference import finitediff, first_order, second_order, fourth_order | ||
| from ..linear_model import lineardiff | ||
| from ..basis_fit import spectraldiff, rbfdiff | ||
| from ..polynomial_fit import polydiff, savgoldiff, splinediff | ||
| from ..basis_fit import spectraldiff, rbfdiff | ||
| from ..total_variation_regularization import velocity, acceleration, jerk, iterative_velocity, smooth_acceleration | ||
| from ..kalman_smooth import rtsdiff, constant_velocity, constant_acceleration, constant_jerk, robustdiff | ||
| from ..smooth_finite_difference import mediandiff, meandiff, gaussiandiff, friedrichsdiff, butterdiff | ||
| from ..linear_model import lineardiff | ||
| # Function aliases for testing cases where parameters change the behavior in a big way, so error limits can be indexed in dict | ||
| def iterated_second_order(*args, **kwargs): return second_order(*args, **kwargs) | ||
| def iterated_fourth_order(*args, **kwargs): return fourth_order(*args, **kwargs) | ||
|
|
@@ -34,13 +34,13 @@ def spline_irreg_step(*args, **kwargs): return splinediff(*args, **kwargs) | |
|
|
||
| # Call both ways, with kwargs (new) and with params list and optional options dict (legacy), to ensure both work | ||
| diff_methods_and_params = [ | ||
| (first_order, {}), (second_order, {}), (fourth_order, {}), # empty dictionary for the case of no parameters | ||
| (iterated_second_order, {'num_iterations':5}), (iterated_fourth_order, {'num_iterations':10}), | ||
| (meandiff, {'window_size':3, 'num_iterations':2}), (meandiff, [3, 2], {'iterate':True}), | ||
| (mediandiff, {'window_size':3, 'num_iterations':2}), (mediandiff, [3, 2], {'iterate':True}), | ||
| (gaussiandiff, {'window_size':5}), (gaussiandiff, [5]), | ||
| (friedrichsdiff, {'window_size':5}), (friedrichsdiff, [5]), | ||
| (butterdiff, {'filter_order':3, 'cutoff_freq':0.7}), (butterdiff, [3, 0.7]), | ||
| (first_order, {}), (second_order, {}), (fourth_order, {}), # empty dictionary for the case of no parameters | ||
| (iterated_second_order, {'num_iterations':5}), (iterated_fourth_order, {'num_iterations':10}), | ||
| (polydiff, {'degree':2, 'window_size':3}), (polydiff, [2, 3]), | ||
| (savgoldiff, {'degree':2, 'window_size':5, 'smoothing_win':5}), (savgoldiff, [2, 5, 5]), | ||
| (splinediff, {'degree':5, 's':2}), (splinediff, [5, 2]), | ||
|
|
@@ -150,7 +150,7 @@ def spline_irreg_step(*args, **kwargs): return splinediff(*args, **kwargs) | |
| [(0, 0), (1, 1), (0, 0), (1, 1)], | ||
| [(1, 0), (2, 2), (1, 0), (2, 2)], | ||
| [(1, 0), (3, 3), (1, 0), (3, 3)]], | ||
| spectraldiff: [[(-15, -15), (-14, -15), (0, -1), (0, 0)], | ||
| spectraldiff: [[(-15, -15), (-14, -14), (0, -1), (0, 0)], | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
| [(0, 0), (1, 1), (0, 0), (1, 1)], | ||
| [(1, 1), (1, 1), (1, 1), (1, 1)], | ||
| [(0, 0), (1, 1), (0, 0), (1, 1)], | ||
|
|
@@ -304,13 +304,19 @@ def test_diff_method(diff_method_and_params, test_func_and_deriv, request): # re | |
|
|
||
| # When one day all or most methods support multidimensionality, and the legacy way of calling methods is | ||
| # gone, diff_methods_and_params can be used for the multidimensionality test as well | ||
| multidim_methods_and_params = [(finitediff, {})] | ||
| multidim_methods_and_params = [ | ||
| (kerneldiff, {'kernel': 'gaussian', 'window_size': 5}), | ||
| (butterdiff, {'filter_order': 3, 'cutoff_freq': 1 - 1e-6}), | ||
| (finitediff, {}), | ||
| ] | ||
|
|
||
| # Similar to the error_bounds table, index by method first. But then we test against only one 2D function, | ||
| # and only in the absence of noise, since the other test covers that. Instead, because multidimensional | ||
| # derivatives can be combined in interesting fashions, we find d^2 / dt_1 dt_2 and the Laplacian, | ||
| # d^2/dt_1^2 + d^2/dt_2^2. Tuples are again (L2,Linf) distances. | ||
| multidim_error_bounds = { | ||
| kerneldiff: [(2, 1), (3, 2)], | ||
| butterdiff: [(0, -1), (1, -1)], | ||
| finitediff: [(0, -1), (1, -1)] | ||
| } | ||
|
|
||
|
|
@@ -364,3 +370,4 @@ def test_multidimensionality(multidim_method_and_params, request): | |
| ax2.plot_wireframe(T1, T2, computed_d2) | ||
| ax3.plot_wireframe(T1, T2, computed_laplacian, label='computed') | ||
| legend = ax3.legend(bbox_to_anchor=(0.7, 0.8)); legend.legend_handles[0].set_facecolor(pyplot.cm.viridis(0.6)) | ||
| fig.suptitle(f'{diff_method.__name__}', fontsize=16) | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -3,6 +3,7 @@ | |
| from scipy.integrate import cumulative_trapezoid | ||
| from scipy.optimize import minimize | ||
| from scipy.stats import median_abs_deviation, norm | ||
| from scipy.ndimage import convolve1d | ||
|
|
||
|
|
||
| def huber(x, M): | ||
|
|
@@ -95,21 +96,20 @@ def friedrichs_kernel(window_size): | |
| return ker / np.sum(ker) | ||
|
|
||
|
|
||
| def convolutional_smoother(x, kernel, num_iterations=1): | ||
| def convolutional_smoother(x, kernel, num_iterations=1, axis=0): | ||
| """Perform smoothing by convolving x with a kernel. | ||
|
|
||
| :param np.array[float] x: 1D data | ||
| :param np.array[float] kernel: kernel to use in convolution | ||
| :param int num_iterations: number of iterations, >=1 | ||
| :param int axis: data dimension along which convolution is performed | ||
|
|
||
| :return: **x_hat** (np.array[float]) -- smoothed x | ||
| """ | ||
| pad_width = len(kernel)//2 | ||
| x_hat = x | ||
|
|
||
| for i in range(num_iterations): | ||
| x_padded = np.pad(x_hat, pad_width, mode='symmetric') # pad with repetition of the edges | ||
| x_hat = np.convolve(x_padded, kernel, 'valid')[:len(x)] # 'valid' slices out only full-overlap spots | ||
| x_hat = convolve1d(x_hat, kernel, axis=axis, mode='reflect') # 'reflect' pads the signal with repeats | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This function is great. No more need to pad things ourselves nor carefully slice out the answer to ensure same-length. |
||
|
|
||
| return x_hat | ||
|
|
||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Calling attention to the fact we support multidimensional data.