diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index bce9d5aa2c..6736e298f0 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -4,8 +4,8 @@ on: [push, pull_request] jobs: run_test_site: - name: ${{ matrix.os }}/py${{ matrix.PYTHON_VERSION }}${{ matrix.LABEL }} - runs-on: ${{ matrix.os }} + name: ${{ matrix.os }}-py${{ matrix.PYTHON_VERSION }}${{ matrix.LABEL }} + runs-on: ${{ matrix.os }}-latest timeout-minutes: 30 env: MPLBACKEND: agg @@ -15,31 +15,39 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, windows-latest, macos-latest] + os: [ubuntu, windows, macos] PYTHON_VERSION: [3.7, 3.8] PIP_SELECTOR: ['[all, tests]'] + UPSTREAM_DEV: [false] include: # test oldest supported version of main dependencies on python 3.6 - - os: ubuntu-latest + - os: ubuntu PYTHON_VERSION: 3.6 OLDEST_SUPPORTED_VERSION: true - DEPENDENCIES: matplotlib==2.2.3 numpy==1.15.4 scipy==1.1 imagecodecs==2019.12.3 + DEPENDENCIES: matplotlib==2.2.3 numpy==1.16.0 scipy==1.1 imagecodecs==2019.12.3 PIP_SELECTOR: '[all, tests]' - LABEL: /oldest + LABEL: -oldest # test minimum requirement - - os: ubuntu-latest + - os: ubuntu PYTHON_VERSION: 3.7 PIP_SELECTOR: '[tests]' - LABEL: /minimum + LABEL: -minimum # Run coverage - - os: ubuntu-latest + - os: ubuntu PYTHON_VERSION: 3.7 PIP_SELECTOR: '[all, tests, coverage]' PYTEST_ARGS_COVERAGE: --cov=. --cov-report=xml - LABEL: /coverage + LABEL: -coverage + # Run test suite against upstream development version + - os: ubuntu + PYTHON_VERSION: 3.8 + PIP_SELECTOR: '[all, tests]' + LABEL: -upstream_dev + UPSTREAM_DEV: true + DEPENDENCIES: numpy scipy scikit-learn scikit-image exclude: # redundant build (same as coverage) - - os: ubuntu-latest + - os: ubuntu PYTHON_VERSION: 3.7 steps: @@ -65,6 +73,13 @@ jobs: run: | pip install ${{ matrix.DEPENDENCIES }} + - name: Install dependencies development version + if: ${{ matrix.UPSTREAM_DEV }} + run: | + pip install --upgrade --no-deps --pre \ + -i https://pypi.anaconda.org/scipy-wheels-nightly/simple \ + ${{ matrix.DEPENDENCIES }} + - name: Run test suite run: | pytest ${{ env.PYTEST_ARGS }} ${{ matrix.PYTEST_ARGS_COVERAGE }} @@ -96,3 +111,5 @@ jobs: - uses: actions/upload-artifact@v2 with: path: ./doc/_build/* + name: doc_build + diff --git a/CHANGES.rst b/CHANGES.rst index 9bb1eeca3c..25dd887eae 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -11,7 +11,8 @@ RELEASE_next_patch (Unreleased) * Widgets plotting improvement and add `pick_tolerance` to plot preferences (`#2615 `_) * Update external links in the loading data section of the user guide (`#2627 `_) * Pass keyword argument to the image IO plugins (`#2627 `_) - +* Drop support for numpy<1.16, in line with NEP 29 and fix protochip reader for numpy 1.20 (`#2616 `_) +* Run test suite against upstream dependencies (numpy, scipy, scikit-learn and scikit-image) (`#2616 `_) Changelog ********* diff --git a/doc/dev_guide/testing.rst b/doc/dev_guide/testing.rst index 8662b6ef32..406cf511cf 100644 --- a/doc/dev_guide/testing.rst +++ b/doc/dev_guide/testing.rst @@ -98,15 +98,15 @@ parallel on your machine. # To run on 2 cores $ pytest -n 2 --dist loadfile -The ``--dist loadfile`` argument will group tests by their containing file. The -groups are then distributed to available workers as whole units, thus guaranteeing -that all tests in a file run in the same worker. +The ``--dist loadfile`` argument will group tests by their containing file. The +groups are then distributed to available workers as whole units, thus guaranteeing +that all tests in a file run in the same worker. .. note:: Running tests in parallel using ``pytest-xdist`` will change the content and format of the output of ``pytest`` to the console. We recommend installing - ```pytest-sugar`` `_ to produce + ```pytest-sugar`` `_ to produce nicer-looking output including an animated progressbar. @@ -114,7 +114,7 @@ Flaky tests ^^^^^^^^^^^ Test functions can sometimes exhibit intermittent or sporadic failure, with seemingly -random or non-deterministic behaviour. They may sometimes pass or sometimes fail, and +random or non-deterministic behaviour. They may sometimes pass or sometimes fail, and it won't always be clear why. These are usually known as "flaky" tests. One way to approach flaky tests is to rerun them, to see if the failure was a one-off. @@ -128,7 +128,7 @@ This can be achieved using the ```pytest-rerunfailures`` plugin `_. @@ -204,7 +204,10 @@ The Github Actions testing matrix also includes the following special cases: - The test suite is run against the oldest supported versions of ``numpy``, ``matplotlib`` and ``scipy``. For more details, see this `Github issue `_. - +- The test suite is run against the development supported versions of ``numpy``, + ``scipy``, ``scikit-learn`` and ``scikit-image`` using the weekly build wheels + available on https://anaconda.org/scipy-wheels-nightly. For more details, see + this `Github issue `_. .. _plot-test-label: diff --git a/hyperspy/_components/gaussian.py b/hyperspy/_components/gaussian.py index 557c3e3439..796aee8e0e 100644 --- a/hyperspy/_components/gaussian.py +++ b/hyperspy/_components/gaussian.py @@ -31,6 +31,7 @@ def _estimate_gaussian_parameters(signal, x1, x2, only_current): axis = signal.axes_manager.signal_axes[0] i1, i2 = axis.value_range_to_indices(x1, x2) X = axis.axis[i1:i2] + if only_current is True: data = signal()[i1:i2] X_shape = (len(X),) @@ -46,20 +47,11 @@ def _estimate_gaussian_parameters(signal, x1, x2, only_current): centre_shape = list(data.shape) centre_shape[i] = 1 - if isinstance(data, da.Array): - _sum = da.sum - _sqrt = da.sqrt - _abs = abs - else: - _sum = np.sum - _sqrt = np.sqrt - _abs = np.abs - - centre = _sum(X.reshape(X_shape) * data, i) / _sum(data, i) - - sigma = _sqrt(_abs(_sum((X.reshape(X_shape) - centre.reshape( - centre_shape)) ** 2 * data, i) / _sum(data, i))) + centre = np.sum(X.reshape(X_shape) * data, i) / np.sum(data, i) + sigma = np.sqrt(np.abs(np.sum((X.reshape(X_shape) - centre.reshape( + centre_shape)) ** 2 * data, i) / np.sum(data, i))) height = data.max(i) + if isinstance(data, da.Array): return da.compute(centre, height, sigma) else: @@ -87,11 +79,11 @@ class Gaussian(Expression): Parameters ----------- A : float - Height scaled by :math:`\sigma\sqrt{(2\pi)}`. ``GaussianHF`` - implements the Gaussian function with a height parameter + Height scaled by :math:`\sigma\sqrt{(2\pi)}`. ``GaussianHF`` + implements the Gaussian function with a height parameter corresponding to the peak height. sigma : float - Scale parameter of the Gaussian distribution. + Scale parameter of the Gaussian distribution. centre : float Location of the Gaussian maximum (peak position). **kwargs diff --git a/hyperspy/_components/lorentzian.py b/hyperspy/_components/lorentzian.py index de125b670c..bfe25b1e73 100644 --- a/hyperspy/_components/lorentzian.py +++ b/hyperspy/_components/lorentzian.py @@ -26,6 +26,7 @@ def _estimate_lorentzian_parameters(signal, x1, x2, only_current): axis = signal.axes_manager.signal_axes[0] i1, i2 = axis.value_range_to_indices(x1, x2) X = axis.axis[i1:i2] + if only_current is True: data = signal()[i1:i2] i = 0 @@ -38,25 +39,16 @@ def _estimate_lorentzian_parameters(signal, x1, x2, only_current): centre_shape = list(data.shape) centre_shape[i] = 1 - if isinstance(data, da.Array): - _cumsum = da.cumsum - _max = da.max - _abs = da.fabs - _argmin = da.argmin - else: - _cumsum = np.cumsum - _max = np.max - _abs = np.abs - _argmin = np.argmin + cdf = np.cumsum(data,i) + cdfnorm = cdf/np.max(cdf, i).reshape(centre_shape) - cdf = _cumsum(data,i) - cdfnorm = cdf/_max(cdf, i).reshape(centre_shape) + icentre = np.argmin(np.abs(0.5 - cdfnorm), i) + igamma1 = np.argmin(np.abs(0.75 - cdfnorm), i) + igamma2 = np.argmin(np.abs(0.25 - cdfnorm), i) - icentre = _argmin(_abs(0.5 - cdfnorm), i) - igamma1 = _argmin(_abs(0.75 - cdfnorm), i) - igamma2 = _argmin(_abs(0.25 - cdfnorm), i) if isinstance(data, da.Array): icentre, igamma1, igamma2 = da.compute(icentre, igamma1, igamma2) + centre = X[icentre] gamma = (X[igamma1] - X[igamma2]) / 2 height = data.max(i) @@ -126,11 +118,11 @@ def __init__(self, A=1., gamma=1., centre=0., module="numexpr", **kwargs): self.convolved = True def estimate_parameters(self, signal, x1, x2, only_current=False): - """Estimate the Lorentzian by calculating the median (centre) and half + """Estimate the Lorentzian by calculating the median (centre) and half the interquartile range (gamma). - - Note that an insufficient range will affect the accuracy of this - method. + + Note that an insufficient range will affect the accuracy of this + method. Parameters ---------- diff --git a/hyperspy/_components/skew_normal.py b/hyperspy/_components/skew_normal.py index b070cd9473..47c856a5b3 100644 --- a/hyperspy/_components/skew_normal.py +++ b/hyperspy/_components/skew_normal.py @@ -45,32 +45,20 @@ def _estimate_skewnormal_parameters(signal, x1, x2, only_current): x0_shape = list(data.shape) x0_shape[i] = 1 - if isinstance(data, da.Array): - _sum = da.sum - _sqrt = da.sqrt - _abs = abs - _argmin = da.argmin - - else: - _sum = np.sum - _sqrt = np.sqrt - _abs = np.abs - _argmin = np.argmin - a1 = np.sqrt(2 / np.pi) b1 = (4 / np.pi - 1) * a1 - m1 = _sum(X.reshape(X_shape) * data, i) / _sum(data, i) - m2 = _abs(_sum((X.reshape(X_shape) - m1.reshape(x0_shape)) ** 2 * data, i) - / _sum(data, i)) - m3 = _abs(_sum((X.reshape(X_shape) - m1.reshape(x0_shape)) ** 3 * data, i) - / _sum(data, i)) + m1 = np.sum(X.reshape(X_shape) * data, i) / np.sum(data, i) + m2 = np.abs(np.sum((X.reshape(X_shape) - m1.reshape(x0_shape)) ** 2 * data, i) + / np.sum(data, i)) + m3 = np.abs(np.sum((X.reshape(X_shape) - m1.reshape(x0_shape)) ** 3 * data, i) + / np.sum(data, i)) x0 = m1 - a1 * (m3 / b1) ** (1 / 3) - scale = _sqrt(m2 + a1 ** 2 * (m3 / b1) ** (2 / 3)) - delta = _sqrt(1 / (a1**2 + m2 * (b1 / m3) ** (2 / 3))) - shape = delta / _sqrt(1 - delta**2) + scale = np.sqrt(m2 + a1 ** 2 * (m3 / b1) ** (2 / 3)) + delta = np.sqrt(1 / (a1**2 + m2 * (b1 / m3) ** (2 / 3))) + shape = delta / np.sqrt(1 - delta**2) - iheight = _argmin(_abs(X.reshape(X_shape) - x0.reshape(x0_shape)), i) + iheight = np.argmin(np.abs(X.reshape(X_shape) - x0.reshape(x0_shape)), i) # height is the value of the function at x0, shich has to be computed # differently for dask array (lazy) and depending on the dimension if isinstance(data, da.Array): @@ -117,12 +105,12 @@ class SkewNormal(Expression): ============== ============= - Variable Parameter + Variable Parameter ============== ============= - :math:`x_0` x0 - :math:`A` A - :math:`\omega` scale - :math:`\alpha` shape + :math:`x_0` x0 + :math:`A` A + :math:`\omega` scale + :math:`\alpha` shape ============== ============= diff --git a/hyperspy/io_plugins/protochips.py b/hyperspy/io_plugins/protochips.py index 8d931e51fd..c07657ee6f 100644 --- a/hyperspy/io_plugins/protochips.py +++ b/hyperspy/io_plugins/protochips.py @@ -22,7 +22,6 @@ from datetime import datetime as dt import warnings import logging -from distutils.version import LooseVersion # Plugin characteristics @@ -138,13 +137,10 @@ def _get_metadata_time_axis(self): def _read_data(self): names = [name.replace(' ', '_') for name in self.column_name] - # Necessary for numpy >= 1.14 - kwargs = {'encoding': 'latin1'} if np.__version__ >= LooseVersion("1.14") else { - } data = np.genfromtxt(self.filename, delimiter=',', dtype=None, names=names, skip_header=self.header_last_line_number, - unpack=True, **kwargs) + encoding='latin1') self._data_dictionary = dict() for i, name, name_dtype in zip(range(len(names)), self.column_name, diff --git a/hyperspy/signal.py b/hyperspy/signal.py index a4cb3cc24d..dbe16cc19a 100644 --- a/hyperspy/signal.py +++ b/hyperspy/signal.py @@ -3985,30 +3985,17 @@ def fft(self, shift=False, apodization=False, real_fft_only=False, **kwargs): use_real_fft = real_fft_only and (self.data.dtype.kind != 'c') - if isinstance(self.data, da.Array): - if use_real_fft: - fft_f = da.fft.rfftn - else: - fft_f = da.fft.fftn - - if shift: - im_fft = self._deepcopy_with_new_data(da.fft.fftshift( - fft_f(im_fft.data, axes=axes, **kwargs), axes=axes)) - else: - im_fft = self._deepcopy_with_new_data( - fft_f(self.data, axes=axes, **kwargs)) + if use_real_fft: + fft_f = np.fft.rfftn else: - if use_real_fft: - fft_f = np.fft.rfftn - else: - fft_f = np.fft.fftn + fft_f = np.fft.fftn - if shift: - im_fft = self._deepcopy_with_new_data(np.fft.fftshift( - fft_f(im_fft.data, axes=axes, **kwargs), axes=axes)) - else: - im_fft = self._deepcopy_with_new_data( - fft_f(self.data, axes=axes, **kwargs)) + if shift: + im_fft = self._deepcopy_with_new_data(np.fft.fftshift( + fft_f(im_fft.data, axes=axes, **kwargs), axes=axes)) + else: + im_fft = self._deepcopy_with_new_data( + fft_f(self.data, axes=axes, **kwargs)) im_fft.change_dtype("complex") im_fft.metadata.General.title = 'FFT of {}'.format( @@ -4079,21 +4066,12 @@ def ifft(self, shift=None, return_real=True, **kwargs): if shift is None: shift = self.metadata.get_item('Signal.FFT.shifted', False) - if isinstance(self.data, da.Array): - if shift: - fft_data_shift = da.fft.ifftshift(self.data, axes=axes) - im_ifft = self._deepcopy_with_new_data( - da.fft.ifftn(fft_data_shift, axes=axes, **kwargs)) - else: - im_ifft = self._deepcopy_with_new_data(da.fft.ifftn( - self.data, axes=axes, **kwargs)) + if shift: + im_ifft = self._deepcopy_with_new_data(np.fft.ifftn( + np.fft.ifftshift(self.data, axes=axes), axes=axes, **kwargs)) else: - if shift: - im_ifft = self._deepcopy_with_new_data(np.fft.ifftn( - np.fft.ifftshift(self.data, axes=axes), axes=axes, **kwargs)) - else: - im_ifft = self._deepcopy_with_new_data(np.fft.ifftn( - self.data, axes=axes, **kwargs)) + im_ifft = self._deepcopy_with_new_data(np.fft.ifftn( + self.data, axes=axes, **kwargs)) im_ifft.metadata.General.title = 'iFFT of {}'.format( im_ifft.metadata.General.title) diff --git a/setup.py b/setup.py index db0a769023..52d77cae94 100644 --- a/setup.py +++ b/setup.py @@ -47,7 +47,7 @@ install_req = ['scipy>=1.1', 'matplotlib>=2.2.3', - 'numpy>=1.15.4', + 'numpy>=1.16.0', 'traits>=4.5.0', 'natsort', 'requests',