diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md new file mode 100755 index 0000000000..ac7f33baa9 --- /dev/null +++ b/.github/CONTRIBUTING.md @@ -0,0 +1,16 @@ +## Discussions + +For discussions on the use of hyperspy, please use the [mailing list](http://groups.google.com/group/hyperspy-users): useful to discuss use cases of HyperSpy and ask about specific applications. + +Discussion on the [gitter chat](https://gitter.im/hyperspy/hyperspy): useful to ask quick questions. + +## Issues + +The [issue tracker](https://github.com/hyperspy/hyperspy/issues) can be used to report bugs or propose new features. When reporting a bug, the following is useful: +- give a minimal example demonstrating the bug, +- copy and paste the error traceback. + +## Contribute + +If you want to contribute to the HyperSpy source code, you can send us a [pull requests](https://github.com/hyperspy/hyperspy/pulls). For more information, please read the [developer guide](http://hyperspy.org/hyperspy-doc/current/dev_guide.html). + diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100755 index 0000000000..e69de29bb2 diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100755 index 0000000000..c26fcb22a9 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,29 @@ +### Requirements +* Read the [developer guide](http://hyperspy.org/hyperspy-doc/current/dev_guide.html). +* Filling out the template; it helps the review process and it is useful to summarise the PR. +* This template can be updated during the progression of the PR to summarise its status. + +*You can delete this section after you read it.* + +### Description of the change +A few sentences and/or a bulleted list to describe and motivate the change: +- Change A. +- Change B. +- etc. + +### Progress of the PR +- [ ] Change implemented (can be split into several points), +- [ ] update docstring (if appropriate), +- [ ] update user guide (if appropriate), +- [ ] add tests, +- [ ] ready for review. + +### Minimal example of the bug fix or the new feature +```python +>>> import hyperspy.api as hs +>>> import numpy as np +>>> s = hs.signals.Signal1D(np.arange(10)) +>>> # Your new feature... +``` +Note that this example can be useful to update the user guide. + diff --git a/.travis.yml b/.travis.yml index 223911efc9..c7dcbbbffa 100644 --- a/.travis.yml +++ b/.travis.yml @@ -57,6 +57,16 @@ script: - pip install coverage coveralls pytest pytest-cov pytest-mpl; py.test --mpl --cov=hyperspy --pyargs hyperspy; +#after_failure: # run only on failure in case there is a need to check matplotlib image comparison +# This needs a service to upload the artifacts (and corresponding configuration) +# See: +# https://blog.travis-ci.com/2012-12-18-travis-artifacts/ +# https://docs.travis-ci.com/user/uploading-artifacts/ +# https://github.com/matplotlib/matplotlib/blob/master/.travis.yml +# - gem install travis-artifacts +# - zip -r image_comparison_failure.zip /tmp/ -i *.png +# - travis-artifacts upload image_comparison_failure.zip + after_success: - coveralls - if [[ $MINIMAL_ENV == 'False' ]]; then diff --git a/README.rst b/README.rst index b948ad3f1d..6f18ea897b 100644 --- a/README.rst +++ b/README.rst @@ -8,8 +8,8 @@ .. |AppVeyor| image:: https://ci.appveyor.com/api/projects/status/github/hyperspy/hyperspy?svg=true&branch=RELEASE_next_patch .. _AppVeyor: https://ci.appveyor.com/project/hyperspy/hyperspy/branch/RELEASE_next_patch -.. |Coveralls| image:: https://coveralls.io/repos/github/hyperspy/hyperspy/badge.svg?branch=RELEASE_next_patch -.. _Coveralls: https://coveralls.io/github/hyperspy/hyperspy?branch=RELEASE_next_patch +.. |Coveralls| image:: https://coveralls.io/repos/github/hyperspy/hyperspy/badge.svg?branch=RELEASE_next_minor +.. _Coveralls: https://coveralls.io/github/hyperspy/hyperspy?branch=RELEASE_next_minor .. |pypi_version| image:: http://img.shields.io/pypi/v/hyperspy.svg?style=flat .. _pypi_version: https://pypi.python.org/pypi/hyperspy @@ -44,6 +44,13 @@ HyperSpy is released under the GPL v3 license. **Since version 0.8.4 HyperSpy only supports Python 3. If you need to install HyperSpy in Python 2.7 install HyperSpy 0.8.3.** + +Contributing +------------ + +Everyone is welcome to contribute. Please read our +`contributing guidelines `_ and get started! + Cite ---- diff --git a/appveyor.yml b/appveyor.yml index 8999893032..b0cb11f398 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -87,6 +87,11 @@ artifacts: - path: dist\*.whl name: win_wheels +on_failure: + - ps: $Env:image_comparison_filename = 'image_comparison_' + $Env:PYTHON_VERSION + '_' + $Env:PYTHON_ARCH + 'bits.zip' + - ps: 7z a -r $Env:image_comparison_filename C:\Users\appveyor\AppData\Local\Temp\1\*png + - ps: Push-AppveyorArtifact $Env:image_comparison_filename + deploy: provider: GitHub auth_token: diff --git a/doc/api/hyperspy.datasets.rst b/doc/api/hyperspy.datasets.rst index 86e5207e22..65d04b35b9 100644 --- a/doc/api/hyperspy.datasets.rst +++ b/doc/api/hyperspy.datasets.rst @@ -13,6 +13,15 @@ hyperspy.datasets.example\_signals module :show-inheritance: +hyperspy\.datasets\.artificial\_data module +------------------------------------------- + +.. automodule:: hyperspy.datasets.artificial_data + :members: + :undoc-members: + :show-inheritance: + + Module contents --------------- diff --git a/doc/api/hyperspy.io_plugins.rst b/doc/api/hyperspy.io_plugins.rst index 608fe4377c..93c2f35c7e 100644 --- a/doc/api/hyperspy.io_plugins.rst +++ b/doc/api/hyperspy.io_plugins.rst @@ -84,8 +84,16 @@ hyperspy.io\_plugins.mrc module :undoc-members: :show-inheritance: -hyperspy.io\_plugins.msa module -------------------------------- +hyperspy\.io\_plugins\.mrcz module +--------------------------------- + +.. automodule:: hyperspy.io_plugins.mrcz + :members: + :undoc-members: + :show-inheritance: + +hyperspy\.io\_plugins\.msa module +--------------------------------- .. automodule:: hyperspy.io_plugins.msa :members: diff --git a/doc/conf.py b/doc/conf.py index 75408ad06d..4d2f6e021b 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -232,7 +232,7 @@ # Add the hyperspy website to the intersphinx domains intersphinx_mapping = {'hyperspyweb': ('http://hyperspy.org/', None), - 'matplotlib': ('https://matplotlib.org', None)} + 'matplotlib': ('https://matplotlib.org', None)} def setup(app): diff --git a/doc/dev_guide.rst b/doc/dev_guide.rst index 8d2df7feb8..11d9890989 100644 --- a/doc/dev_guide.rst +++ b/doc/dev_guide.rst @@ -206,6 +206,9 @@ Useful hints on testing: your PR. There should be a link to it at the bottom of your PR on the github PR page. This service can help you to find how well your code is being tested and exactly which part is not currently tested. +* `pytest-sugar `_ can be installed + to have a nicer look and feel of py.test in the console (encoding issue have + been reported in the Windows console). .. _plot-test-label: @@ -241,6 +244,9 @@ after execution. It is necessary to delete the figure and this can be done by using mpl_cleanup fixture: you juste need to add ``mpl_cleanup`` as the first argument of the tests. +When the plotting tests are failling, it is possible to download the figure comparison images generated by pytest-mpl in the `artifacts tabs `_ of the corresponding build. + + The plotting tests need matplotlib > 2.0.0, since the matplotlib `style change `_ between matplotlib 1.x and 2.x is enough to make the test failed. Freetype>=2.8 is also required to pass the @@ -269,6 +275,10 @@ specification >> s = hs.load("file.hspy", lazy=True) + >>> ssum = s.sum(axis=0) + >>> ssum.compute(close_file=True) # closes the file.hspy file + + Lazy stacking ^^^^^^^^^^^^^ diff --git a/doc/user_guide/electron_holography.rst b/doc/user_guide/electron_holography.rst index 83625ed70c..5c0574a148 100644 --- a/doc/user_guide/electron_holography.rst +++ b/doc/user_guide/electron_holography.rst @@ -133,3 +133,44 @@ assignment by :py:meth:`~._signals.hologram_image.HologramImage.estimate_sideband_position` and :py:meth:`~._signals.hologram_image.HologramImage.estimate_sideband_size` methods. This, however, is not recommended for not experienced users. + + +Getting hologram statistics +-------------------------- +There are many reasons to have an access to some parameters of holograms which describe the quality of the data. +:meth:`~._signals.hologram_image.HologramImage.statistics` can be used to calculate carrier frequency, +fringe spacing and estimate fringe contrast. The method outputs dictionary with the values listed above calculated also +in different units. In particular fringe spacing is calculated in pixels (fringe sampling) as well as in +calibrated units. Carrier frequency is calculated in inverse pixels or calibrated units as well as radians. +Estimation of fringe contrast is either performed by division of standard deviation by mean value of hologram or +in Fourier space as twice the fraction of amplitude of sideband centre and amplitude of center band (i.e. FFT origin). +The first method is default and using it requires the fringe field to cover entire field of view; the method is +highly sensitive to any artifacts in holograms like dud pixels, +fresnel fringes and etc. The second method is less sensitive to the artifacts listed above and gives +reasonable estimation of fringe contrast even if the hologram is not covering entire field of view, but it is highly +sensitive to precise calculation of sideband position and therefore sometimes may underestimate the contrast. +The selection between to algorithms can be done using parameter ``fringe_contrast_algorithm`` setting it to +``'statistical'`` or to ``'fourier'``. The side band position typically provided by a ``sb_position``. +The statistics can be accessed as follows: + +.. code-block:: python + + >>> statistics = im.statistics(sb_position=sb_position) + +Note that by default the ``single_value`` parameter is ``True`` which forces the output of single values for each +entry of statistics dictionary calculated from first navigation pixel. (I.e. for image stacks only first image +will be used for calculating the statistics.) Otherwise: + +.. code-block:: python + + >>> statistics = im.statistics(sb_position=sb_position, single_value=False) + +Entries of ``statistics`` are Hyperspy signals containing the hologram parameters for each image in a stack. + +The estimation of fringe spacing using ``'fourier'`` method applies apodization in real space prior calculating FFT. +By default ``apodization`` parameter is set to ``hanning`` which applies Hanning window. Other options are using either +``None`` or ``hamming`` for no apodization or Hamming window. Please note that for experimental conditions +especially with extreme sampling of fringes and strong contrast variation due to Fresnel effects +the calculated fringe contrast provides only an estimate and the values may differ strongly depending on apodization. + +For further information see documentation of :meth:`~._signals.hologram_image.HologramImage.statistics`. \ No newline at end of file diff --git a/doc/user_guide/getting_started.rst b/doc/user_guide/getting_started.rst index 92f858df84..c63d8361e0 100644 --- a/doc/user_guide/getting_started.rst +++ b/doc/user_guide/getting_started.rst @@ -209,6 +209,17 @@ signals: >>> hs.datasets.example_signals.EDS_TEM_Spectrum().plot() +.. versionadded:: 1.4 + :py:mod:`~.datasets.artificial_data` + +There are also artificial datasets, which are made to resemble real +experimental data. + +.. code-block:: python + + >>> s = hs.datasets.artificial_data.get_core_loss_eels_signal() + >>> s.plot() + .. _eelsdb-label: .. versionadded:: 1.0 @@ -409,6 +420,56 @@ navigation dimensions: >>> s.axes_manager.indices = (5, 4) +.. _quantity_and_converting_units: + +Using quantity and converting units +------------------------------------------- + +The scale and the offset of each axis can be set and retrieved as quantity. + +.. code-block:: python + + >>> s = hs.signals.Signal1D(np.arange(10)) + >>> s.axes_manager[0].scale_as_quantity + 1.0 dimensionless + >>> s.axes_manager[0].scale_as_quantity = '2.5 µm' + >>> s.axes_manager + + Name | size | index | offset | scale | units + ================ | ====== | ====== | ======= | ======= | ====== + ---------------- | ------ | ------ | ------- | ------- | ------ + | 10 | | 0 | 2.5 | µm + >>> s.axes_manager[0].offset_as_quantity = '2.5 nm' + + Name | size | index | offset | scale | units + ================ | ====== | ====== | ======= | ======= | ====== + ---------------- | ------ | ------ | ------- | ------- | ------ + | 10 | | 2.5 | 2.5e+03 | nm + + +Internally, HyperSpy uses the `pint `_ library to manage the scale and offset quantities. The ``scale_as_quantity`` and ``offset_as_quantity`` attributes return pint object: + +.. code-block:: python + + >>> q = s.axes_manager[0].offset_as_quantity + >>> type(q) # q is a pint quantity object + pint.quantity.build_quantity_class..Quantity + >>> q + 2.5 nanometer + + +The ``convert_units`` method of the :py:class:`~.axes.AxesManager` converts units, which by default (no parameters provided) converts all axis units to an optimal units to avoid using too large or small number. + +Each axis can also be converted individually using the ``convert_to_units`` method of the :py:class:`~.axes.DataAxis`: + +.. code-block:: python + + >>> axis = hs.hyperspy.axes.DataAxis(size=10, scale=0.1, offset=10, units='mm') + >>> axis.scale_as_quantity + 0.1 millimeter + >>> axis.convert_to_units('µm') + >>> axis.scale_as_quantity + 100.0 micrometer .. _saving: diff --git a/doc/user_guide/images/hologram_fft.png b/doc/user_guide/images/hologram_fft.png new file mode 100644 index 0000000000..d512f95d77 Binary files /dev/null and b/doc/user_guide/images/hologram_fft.png differ diff --git a/doc/user_guide/images/plot_images_eds.png b/doc/user_guide/images/plot_images_eds.png index ede2e07e84..b93398328c 100644 Binary files a/doc/user_guide/images/plot_images_eds.png and b/doc/user_guide/images/plot_images_eds.png differ diff --git a/doc/user_guide/images/plot_images_eds_cmap_factors_side_by_side.png b/doc/user_guide/images/plot_images_eds_cmap_factors_side_by_side.png new file mode 100644 index 0000000000..3fa01b6c45 Binary files /dev/null and b/doc/user_guide/images/plot_images_eds_cmap_factors_side_by_side.png differ diff --git a/doc/user_guide/images/plot_images_eds_cmap_list.png b/doc/user_guide/images/plot_images_eds_cmap_list.png new file mode 100644 index 0000000000..6098154c27 Binary files /dev/null and b/doc/user_guide/images/plot_images_eds_cmap_list.png differ diff --git a/doc/user_guide/images/plot_images_subplots.png b/doc/user_guide/images/plot_images_subplots.png index b86a4aff7b..5c595a8ec5 100644 Binary files a/doc/user_guide/images/plot_images_subplots.png and b/doc/user_guide/images/plot_images_subplots.png differ diff --git a/doc/user_guide/install.rst b/doc/user_guide/install.rst index 2f2329d861..805f45e26c 100644 --- a/doc/user_guide/install.rst +++ b/doc/user_guide/install.rst @@ -31,7 +31,7 @@ Bundle. This is a customised `WinPython `_ distribution that includes HyperSpy, all its dependencies and many other scientific Python packages. -For details and download links go to https://github.com/hyperspy/hyperspy-bundle +For details and download links go to https://github.com/hyperspy/hyperspy-bundle .. _quick-anaconda-install: @@ -128,6 +128,9 @@ Alternatively you can select the extra functionalities required: * ``gui-traitsui`` to install required libraries to use the GUI elements based on `traitsui `_ * ``test`` to install required libraries to run HyperSpy's unit tests. +* ``lazy_FEI_EMD`` to install required libraries to load FEI spectrum imgages + lazily. +* ``mrcz-blosc`` to install the blosc library to use compression with the mrcz plugin. * ``doc`` to install required libraries to build HyperSpy's documentation. For example: @@ -254,6 +257,15 @@ is going to be installed from source, Cython is also required. Also, to compile the documentation sphinxcontrib-napoleon and sphinx_rtd_theme are required. +In case some of the required libraries are not automatically installed when +installing from source in a conda environment, these can be obtained beforehand +by installing and removing hyperspy from that environment; + +.. code-block:: bash + $ conda install hyperspy + $ conda remove hyperspy + $ sudo pip install -e ./ + .. _known-issues: Known issues diff --git a/doc/user_guide/io.rst b/doc/user_guide/io.rst index d1cca31b18..18a42325fa 100644 --- a/doc/user_guide/io.rst +++ b/doc/user_guide/io.rst @@ -75,6 +75,11 @@ Almost all file readers support accessing the data without reading it to memory analysing large files. To load a file without loading it to memory simply set ``lazy`` to ``True`` e.g.: +The units of the navigation and signal axes can be converted automatically +during loading using the ``convert_units`` parameter. If `True`, the +``convert_to_units`` method of the ``axes_manager`` will be used for the conversion +and if set to `False`, the units will not be converted. The default is `False`. + .. code-block:: python >>> s = hs.load("filename.hspy", lazy=True) @@ -181,6 +186,8 @@ HyperSpy. The "lazy" column specifies if lazy evaluation is supported. +--------------------+--------+--------+--------+ | MRC | Yes | No | Yes | +--------------------+--------+--------+--------+ + | MRCZ | Yes | Yes | Yes | + +--------------------+--------+--------+--------+ | EMSA/MSA | Yes | Yes | No | +--------------------+--------+--------+--------+ | NetCDF | Yes | No | No | @@ -195,7 +202,11 @@ HyperSpy. The "lazy" column specifies if lazy evaluation is supported. +--------------------+--------+--------+--------+ | Bruker's bcf | Yes | No | Yes | +--------------------+--------+--------+--------+ - | EMD (Berkley Labs) | Yes | Yes | Yes | + | Bruker's spx | Yes | No | No | + +--------------------+--------+--------+--------+ + | EMD (NCEM) | Yes | Yes | Yes | + +--------------------+--------+--------+--------+ + | EMD (FEI) | Yes | No | Yes | +--------------------+--------+--------+--------+ | Protochips log | Yes | No | No | +--------------------+--------+--------+--------+ @@ -300,9 +311,7 @@ passing ``chunks=True`` results in ``(7, 7, 256)`` chunks. Extra saving arguments ^^^^^^^^^^^^^^^^^^^^^^^ -compression: One of None, 'gzip', 'szip', 'lzf'. - -'gzip' is the default +- `compression` : One of None, 'gzip', 'szip', 'lzf' (default is 'gzip'). .. _netcdf-format: @@ -337,6 +346,67 @@ loading the file using a different mode (default is copy-on-write) . However, note that lazy loading does not support in-place writing (i.e lazy loading and the "r+" mode are incompatible). +.. _mrcz-format: + +MRCZ +---- + +MRCZ is an extension of the CCP-EM MRC2014 file format. `CCP-EM MRC2014 +`_ file format. It uses the +`blosc` meta-compression library to bitshuffle and compress files in a blocked, +multi-threaded environment. The supported data types are: + +[`float32`,`int8`,`uint16`,`int16`,`complex64`] + +It supports arbitrary meta-data, which is serialized into JSON. + +MRCZ also supports asychronous reads and writes. + +Repository: https://github.com/em-MRCZ +PyPI: https://pypi.python.org/pypi/mrcz +Citation: Submitted. +Preprint: http://www.biorxiv.org/content/early/2017/03/13/116533 + +Extra saving arguments +^^^^^^^^^^^^^^^^^^^^^^ + +`do_async`: currently supported within Hyperspy for writing only, this will save + the file in a background thread and return immediately. Defaults + to `False`. +.. Warning:: + + There is no method currently implemented within Hyperspy to tell if an + asychronous write has finished. + +`compressor`: The compression codec, one of [`None`,`'zlib`',`'zstd'`, `'lz4'`]. + Defaults to `None`. +`clevel`: The compression level, an `int` from 1 to 9. Defaults to 1. +`n_threads`: The number of threads to use for `blosc` compression. Defaults to + the maximum number of virtual cores (including Intel Hyperthreading) + on your system, which is recommended for best performance. If \ + `do_asyc = True` you may wish to leave one thread free for the + Python GIL. + +The recommended compression codec is 'zstd' (zStandard) with `clevel=1` for +general use. If speed is critical, use 'lz4' (LZ4) with `clevel=9`. Integer data +compresses more redably than floating-point data, and in general the histogram +of values in the data reflects how compressible it is. + +To save files that are compatible with other programs that can use MRC such as +GMS, IMOD, Relion, MotionCorr, etc. save with `compressor=None`, extension `.mrc`. +JSON metadata will not be recognized by other MRC-supporting software but should +not cause crashes. + +Example Usage +^^^^^^^^^^^^^ + +.. code-block:: python + + >>> s.save('file.mrcz', do_async=True, compressor='zstd', clevel=1) + + >>> new_signal = hs.load('file.mrcz') + + .. _msa-format: EMSA/MSA @@ -508,6 +578,19 @@ available publicly available from EDAX and are on Github SpcMap-spd.file.format.pdf>`_, and `.ipr `_). +Extra loading arguments for ``.spd`` file +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- `spc_fname` : {None, str}, name of file from which to read the spectral calibration. If data was exported fully from EDAX TEAM software, an .spc file with the same name as the .spd should be present. If `None`, the default filename will be searched for. Otherwise, the name of the ``.spc`` file to use for calibration can be explicitly given as a string. +- `ipr_fname` : {None, str}, name of file from which to read the spatial calibration. If data was exported fully from EDAX TEAM software, an ``.ipr`` file with the same name as the ``.spd`` (plus a "_Img" suffix) should be present. If `None`, the default filename will be searched for. Otherwise, the name of the ``.ipr`` file to use for spatial calibration can be explicitly given as a string. +- **kwargs: remaining arguments are passed to the Numpy ``memmap`` function. + +Extra loading arguments for ``.spd`` and ``.spc`` files +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- `load_all_spc` : bool, switch to control if all of the ``.spc`` header is read, or just the important parts for import into HyperSpy. + + .. _fei-format: FEI TIA ser and emi @@ -579,11 +662,16 @@ header in a plain-text format. The reader extracts the measured temperature along the time axis, as well as the date and calibration constants stored in the header. +Bruker's formats +---------------- +Bruker's Esprit(TM) software and hardware allows to acquire and save the data +in different kind of formats. Hyperspy can read two main basic formats: bcf +and spx. .. _bcf-format: Bruker composite file ----------------- +^^^^^^^^^^^^^^^^^^^^^ HyperSpy can read "hypermaps" saved with Bruker's Esprit v1.x or v2.x in bcf hybrid (virtual file system/container with xml and binary data, optionally @@ -600,22 +688,12 @@ Note that Bruker Esprit uses a similar format for EBSD data, but it is not currently supported by HyperSpy. Extra loading arguments -^^^^^^^^^^^^^^^^^^^^^^^ -select_type: one of (None, 'spectrum', 'image'). If specified, only the corresponding -type of data, either spectrum or image, is returned. By default (None), all data are loaded. - -index: one of (None, int, "all"). Allow to select the index of the dataset in the bcf file, -which can contains several datasets. Default None value result in loading the first dataset. -When set to 'all', all available datasets will be loaded and returned as separate signals. ++++++++++++++++++++++++ -downsample: the downsample ratio of hyperspectral array (height and width only), -can be integer >=1, where '1' results in no downsampling (default 1). The -underlying method of downsampling is unchangeable: sum. Differently than -block_reduce from skimage.measure it is memory efficient (does not creates -intermediate arrays, works inplace). - -cutoff_at_kV: if set (can be int or float >= 0) can be used either to crop or -enlarge energy (or channels) range at max values. (default None) +- `select_type` : one of (None, 'spectrum', 'image'). If specified, only the corresponding type of data, either spectrum or image, is returned. By default (None), all data are loaded. +- `index` : one of (None, int, "all"). Allow to select the index of the dataset in the bcf file, which can contains several datasets. Default None value result in loading the first dataset. When set to 'all', all available datasets will be loaded and returned as separate signals. +- `downsample` : the downsample ratio of hyperspectral array (height and width only), can be integer >=1, where '1' results in no downsampling (default 1). The underlying method of downsampling is unchangeable: sum. Differently than block_reduce from skimage.measure it is memory efficient (does not creates intermediate arrays, works inplace). +- `cutoff_at_kV` : if set (can be int or float >= 0) can be used either to crop or enlarge energy (or channels) range at max values (default None). Example of loading reduced (downsampled, and with energy range cropped) "spectrum only" data from bcf (original shape: 80keV EDS range (4096 channels), @@ -631,8 +709,8 @@ load the same file without extra arguments: .. code-block:: python >>> hs.load("sample80kv.bcf") - [, - , + [, + , ] The loaded array energy dimension can by forced to be larger than the data @@ -641,25 +719,144 @@ recorded by setting the 'cutoff_at_kV' kwarg to higher value: .. code-block:: python >>> hs.load("sample80kv.bcf", cutoff_at_kV=80) - [, - , + [, + , ] Note that setting downsample to >1 currently locks out using SEM imagery as navigator in the plotting. +.. _spx-format: + +SPX format +^^^^^^^^^^ + +Hyperspy can read Bruker's spx format (single spectra format based on XML). +The format contains extensive list of details and parameters of EDS analyses +which are mapped in hyperspy to metadata and original_metadata dictionaries. .. _emd-format: -EMD Electron Microscopy Datasets (HDF5) ---------------------------------------- +EMD +--- EMD stands for “Electron Microscopy Dataset.” It is a subset of the open source HDF5 wrapper format. N-dimensional data arrays of any standard type can be -stored in an HDF5 file, as well as tags and other metadata. The EMD format was -developed at Lawrence Berkeley National Lab (see http://emdatasets.com/ for -more information). NOT to be confused with the FEI EMD format which was -developed later and has a different structure. +stored in an HDF5 file, as well as tags and other metadata. + +EMD (NCEM) +^^^^^^^^^^ + +This EMD format was developed by Colin Ophus at the National Center for +Electron Microscopy (NCEM). See http://emdatasets.com/ for more information. + +For files containing several datasets, the `dataset_name` argument can be +used to select a specific one: + +.. code-block:: python + + >>> s = hs.load("adatafile.emd", dataset_name="/experimental/science_data_1") + + +Or several by using a list: + +.. code-block:: python + + >>> s = hs.load("adatafile.emd", + ... dataset_name=[ + ... "/experimental/science_data_1", + ... "/experimental/science_data_1"]) + + +EMD (FEI) +^^^^^^^^^ + +This is a non-compliant variant of the standard EMD format developed by FEI. +HyperSpy supports importing images, EDS spectrum and EDS +spectrum streams (spectrum images stored in a sparse format). For spectrum +streams, there are several loading options (described below) to control the frames +and detectors to load and if to sum them on loading. The default is +to import the sum over all frames and over all detectors in order to decrease +the data size in memory. + +Note that pruned FEI EMD files only contain the spectrum image in a proprietary +format that HyperSpy cannot read. Therefore, +don't prune FEI EMD files in you intend to read them with HyperSpy. +Note also that loading a spectrum image can be slow if `numba +`_ is not installed. + +.. code-block:: python + + >>> hs.load("sample.emd") + [, + ] + + +.. note:: + + To enable lazy loading of EDX spectrum images in this format it may be + necessary to install `sparse `_. See + See also :ref:`install-with-python-installers`. Note also that currently + only lazy uncompression rather than lazy loading is implemented. This + means that it is not currently possible to read EDX SI FEI EMD files with + size bigger than the available memory. + + + +.. warning:: + + This format is still not stable and files generated with the most recent + version of Velox may not be supported. If you experience issues loading + a file, please report it to the HyperSpy developers so that they can + add support for newer versions of the format. + +.. _Extra-loading-arguments-fei-emd: + +Extra loading arguments ++++++++++++++++++++++++ + +- `select_type` : one of {None, 'image', 'single_spectrum', 'spectrum_image'} (default is None). +- `first_frame` : integer (default is 0). +- `last_frame` : integer (default is None) +- `sum_frames` : boolean (default is True) +- `sum_EDS_detectors` : boolean (default is True) +- `rebin_energy` : integer (default is 1) +- `SI_dtype` : numpy dtype (default is None) +- `load_SI_image_stack` : boolean (default is False) + +The ``select_type`` parameter specifies the type of data to load: if `image` is selected, +only images (including EDS maps) are loaded, if `single_spectrum` is selected, only +single spectra are loaded and if `spectrum_image` is selected, only the spectrum +image will be loaded. The ``first_frame`` and ``last_frame`` parameters can be used +to select the frame range of the EDS spectrum image to load. To load each individual +EDS frame, use ``sum_frames=False`` and the EDS spectrum image will be loaded +with an an extra navigation dimension corresponding to the frame index +(time axis). Use the ``sum_EDS_detectors=True`` parameter to load the signal of +each individual EDS detector. In such a case, a corresponding number of distinct +EDS signal is returned. The default is ``sum_EDS_detectors=True``, which loads the +EDS signal as a sum over the signals from each EDS detectors. The ``rebin_energy`` +and ``SI_dtype`` parameters are particularly useful in combination with +``sum_frames=False`` to reduce the data size when one want to read the +individual frames of the spectrum image. If ``SI_dtype=None`` (default), the dtype +of the data in the emd file is used. The ``load_SI_image_stack`` parameter allows +loading the stack of STEM images acquired simultaneously as the EDS spectrum image. +This can be useful to monitor any specimen changes during the acquisition or to +correct the spatial drift in the spectrum image by using the STEM images. + +.. code-block:: python + + >>> hs.load("sample.emd", sum_EDS_detectors=False) + [, + , + , + , + ] + + >>> hs.load("sample.emd", sum_frames=False, load_SI_image_stack=True, SI_dtype=np.int8, rebin_energy=4) + [, + ] + + .. _protochips-format: diff --git a/doc/user_guide/metadata_structure.rst b/doc/user_guide/metadata_structure.rst index 4165e270a5..d10a0cadb6 100644 --- a/doc/user_guide/metadata_structure.rst +++ b/doc/user_guide/metadata_structure.rst @@ -127,12 +127,13 @@ time_zone time type: Str - The acquisition or creation time in ISO 8601 time format. + The acquisition or creation time in ISO 8601 time format, e.g. '13:29:10'. date type: Str - The acquisition or creation date in ISO 8601 date format + The acquisition or creation date in ISO 8601 date format, e.g. + '2018-01-28'. authors diff --git a/doc/user_guide/model.rst b/doc/user_guide/model.rst index b8bd00a0c0..5d8f771c86 100644 --- a/doc/user_guide/model.rst +++ b/doc/user_guide/model.rst @@ -641,34 +641,37 @@ algorithms, see the .. table:: Features of curve fitting optimizers. - +--------------------------+--------+------------------+------------+--------+ - | Optimizer | Bounds | Error estimation | Method | Type | - +==========================+========+==================+============+========+ - | "leastsq" | Yes | Yes | 'ls' | local | - +--------------------------+--------+------------------+------------+--------+ - | "mpfit" | Yes | Yes | 'ls' | local | - +--------------------------+--------+------------------+------------+--------+ - | "odr" | No | Yes | 'ls' | local | - +--------------------------+--------+------------------+------------+--------+ - | "Nelder-Mead" | No | No | 'ls', 'ml' | local | - +--------------------------+--------+------------------+------------+--------+ - | "Powell" | No | No | 'ls', 'ml' | local | - +--------------------------+--------+------------------+------------+--------+ - | "CG" | No | No | 'ls', 'ml' | local | - +--------------------------+--------+------------------+------------+--------+ - | "BFGS" | No | No | 'ls', 'ml' | local | - +--------------------------+--------+------------------+------------+--------+ - | "Newton-CG" | No | No | 'ls', 'ml' | local | - +--------------------------+--------+------------------+------------+--------+ - | "L-BFGS-B" | Yes | No | 'ls', 'ml' | local | - +--------------------------+--------+------------------+------------+--------+ - | "TNC" | Yes | No | 'ls', 'ml' | local | - +--------------------------+--------+------------------+------------+--------+ - | "Differential Evolution" | Yes | No | 'ls', 'ml' | global | - +--------------------------+--------+------------------+------------+--------+ - - -The following example shows how to perform least squares with error estimation. + +--------------------------+--------+------------------+----------------------+--------+ + | Optimizer | Bounds | Error estimation | Method | Type | + +==========================+========+==================+======================+========+ + | "leastsq" | Yes | Yes | 'ls' | local | + +--------------------------+--------+------------------+----------------------+--------+ + | "mpfit" | Yes | Yes | 'ls' | local | + +--------------------------+--------+------------------+----------------------+--------+ + | "odr" | No | Yes | 'ls' | local | + +--------------------------+--------+------------------+----------------------+--------+ + | "Nelder-Mead" | No | No | 'ls', 'ml', 'custom' | local | + +--------------------------+--------+------------------+----------------------+--------+ + | "Powell" | No | No | 'ls', 'ml', 'custom' | local | + +--------------------------+--------+------------------+----------------------+--------+ + | "CG" | No | No | 'ls', 'ml', 'custom' | local | + +--------------------------+--------+------------------+----------------------+--------+ + | "BFGS" | No | No | 'ls', 'ml', 'custom' | local | + +--------------------------+--------+------------------+----------------------+--------+ + | "Newton-CG" | No | No | 'ls', 'ml', 'custom' | local | + +--------------------------+--------+------------------+----------------------+--------+ + | "L-BFGS-B" | Yes | No | 'ls', 'ml', 'custom' | local | + +--------------------------+--------+------------------+----------------------+--------+ + | "TNC" | Yes | No | 'ls', 'ml', 'custom' | local | + +--------------------------+--------+------------------+----------------------+--------+ + | "Differential Evolution" | Yes | No | 'ls', 'ml', 'custom' | global | + +--------------------------+--------+------------------+----------------------+--------+ + + +Least squares with error estimation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The following example shows how to perfom least squares optimisation with error estimation. First we create data consisting of a line line ``y = a*x + b`` with ``a = 1`` and ``b = 100`` and we add white noise to it: @@ -712,6 +715,10 @@ However, the value won't be correct unless an accurate value of the variance is defined in ``metadata.Signal.Noise_properties.variance``. See :ref:`signal.noise_properties` for more information. + +Weighted least squares with error estimation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + In the following example, we add poissonian noise to the data instead of gaussian noise and proceed to fit as in the previous example. @@ -744,6 +751,10 @@ approximation in most cases. >>> line.coefficients.std (0.0055752036447948173, 0.46950832982673557) + +Maximum likelihood optimisation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + We can use Poisson maximum likelihood estimation instead, which is an unbiased estimator for poissonian noise. To do so, we use a general optimizer called "Nelder-Mead". @@ -754,6 +765,96 @@ To do so, we use a general optimizer called "Nelder-Mead". >>> line.coefficients.value (1.0030718094185611, -0.63590210946134107) +Custom optimisations +^^^^^^^^^^^^^^^^^^^^ + +.. versionadded:: 1.4 Custom optimiser functions + +Instead of the in-built least squares (``'ls'``) and maximum likelihood +(``'ml'``) optimisation functions, a custom function can be passed to the +model: + +.. code-block:: python + + >>> def my_custom_function(model, values, data, weights=None): + ... """ + ... Parameters + ... ---------- + ... model : Model instance + ... the model that is fitted. + ... values : np.ndarray + ... A one-dimensional array with free parameter values suggested by the + ... optimiser (that are not yet stored in the model). + ... data : np.ndarray + ... A one-dimensional array with current data that is being fitted. + ... weights : {np.ndarray, None} + ... An optional one-dimensional array with parameter weights. + ... + ... Returns + ... ------- + ... score : float + ... A signle float value, representing a score of the fit, with + ... lower values corresponding to better fits. + ... """ + ... # Almost any operation can be performed, for example: + ... # First we store the suggested values in the model + ... model.fetch_values_from_array(values) + ... + ... # Evaluate the current model + ... cur_value = model(onlyactive=True) + ... + ... # Calculate the weighted difference with data + ... if weights is None: + ... weights = 1 + ... difference = (data - cur_value) * weights + ... + ... # Return squared and summed weighted difference + ... return (difference**2).sum() + >>> m.fit(fitter='TNC', method='custom', min_function=my_custom_function) + +If the optimiser requires a gradient estimation function, it can be similarly +passed, using the following signature: + +.. code-block:: python + + >>> def my_custom_gradient_function(model, values, data, weights=None): + ... """ + ... Parameters + ... ---------- + ... model : Model instance + ... the model that is fitted. + ... values : np.ndarray + ... A one-dimensional array with free parameter values suggested by the + ... optimiser (that are not yet stored in the model). + ... data : np.ndarray + ... A one-dimensional array with current data that is being fitted. + ... weights : {np.ndarray, None} + ... An optional one-dimensional array with parameter weights. + ... + ... Returns + ... ------- + ... gradients : np.ndarray + ... a one-dimensional array of gradients, the size of `values`, + ... containing each parameter gradient with the given values + ... """ + ... # As an example, estimate maximum likelihood gradient: + ... model.fetch_values_from_array(values) + ... cur_value = model(onlyactive=True) + ... + ... # We use in-built jacobian estimation + ... jac = model._jacobian(values, data) + ... + ... return -(jac * (data / cur_value - 1)).sum(1) + >>> m.fit(method='custom', + ... grad=True, + ... fitter='BFGS', # an optimiser that requires gradient estimation + ... min_function=my_custom_function, + ... min_function_grad=my_custom_gradient_function) + + +Bounded optimisation +^^^^^^^^^^^^^^^^^^^^ + Problems of ill-conditioning and divergence can be ameliorated by using bounded optimization. Currently, not all optimizers support bounds - see the :ref:`table above `. In the following example a gaussian @@ -780,7 +881,8 @@ component using mpfit and bounds on the ``centre`` parameter. A 99918.7 centre 9.99976 - +Goodness of fit +^^^^^^^^^^^^^^^ .. versionadded:: 0.7 chi-squared and reduced chi-squared @@ -804,7 +906,10 @@ To visualise the result use the :py:meth:`~.model.BaseModel.plot` method: >>> m.plot() # Visualise the results -.. versionadded:: 0.7 + + + +.. versionadded:: 0.7 plot componets features By default only the full model line is displayed in the plot. In addition, it is possible to display the individual components by calling @@ -821,6 +926,17 @@ To disable this feature call .. versionadded:: 0.7.1 :py:meth:`~.model.BaseModel.suspend_update` .. and :py:meth:`~.model.Model.resume_update` +.. versionadded:: 1.4 ``Signal1D.plot`` keyword arguments + +All extra keyword argments are passes to the :meth:`plot` method of the +corresponing signal object. For example, the following plots the model signal +figure but not its navigator: + +.. code-block:: python + + >>> m.plot(navigator=False) + + By default the model plot is automatically updated when any parameter value changes. It is possible to suspend this feature with :py:meth:`~.model.BaseModel.suspend_update`. diff --git a/doc/user_guide/signal1d.rst b/doc/user_guide/signal1d.rst index 9570ff6659..e39a2ccecb 100644 --- a/doc/user_guide/signal1d.rst +++ b/doc/user_guide/signal1d.rst @@ -49,7 +49,8 @@ Background removal ------------------ The :py:meth:`~.signal.Signal1D.remove_background` method provides -background removal capabilities through both a CLI and a GUI. Current +background removal capabilities through both a CLI and a GUI. The GUI displays +an interactive preview of the remainder after background subtraction. Current background type supported are power law, offset, polynomial and gaussian. By default the background is estimated, but a full fit can also be used. The full fit is more accurate, but slower. diff --git a/doc/user_guide/tools.rst b/doc/user_guide/tools.rst index 76197ad516..4543c65014 100644 --- a/doc/user_guide/tools.rst +++ b/doc/user_guide/tools.rst @@ -453,6 +453,7 @@ features differ from numpy): + Allow independent indexing of signal and navigation dimensions + Support indexing with decimal numbers. + + Support indexing with units. + Use the image order for indexing i.e. [x, y, z,...] (HyperSpy) vs [...,z,y,x] (numpy) @@ -494,8 +495,8 @@ First consider indexing a single spectrum, which has only one signal dimension >>> s.isig[5::2].data array([5, 7, 9]) - -Unlike numpy, HyperSpy supports indexing using decimal numbers, in which case +Unlike numpy, HyperSpy supports indexing using decimal numbers or string +(containing a decimal number and an units), in which case HyperSpy indexes using the axis scales instead of the indices. .. code-block:: python @@ -514,7 +515,9 @@ HyperSpy indexes using the axis scales instead of the indices. array([1, 2, 3]) >>> s.isig[0.5:4:2].data array([1, 3]) - + >>> s.axes_manager[0].units = 'µm' + >>> s.isig[:'2000 nm'].data + array([0, 1, 2, 3]) Importantly the original :py:class:`~.signal.BaseSignal` and its "indexed self" share their data and, therefore, modifying the value of the data in one @@ -915,6 +918,21 @@ The execution can be sped up by passing ``parallel`` keyword to the >>> s.map(slow_func, parallel=True) 100%|██████████████████████████████████████| 20/20 [00:02<00:00, 6.73it/s] +.. versionadded:: 1.4 + Iterating over signal using a parameter with no navigation dimension. + +In this case, the parameter is cyclically iterated over the navigation +dimension of the input signal. In the example below, signal s is +multiplied by a cosine parameter d, which is repeated over the +navigation dimension of s. + +.. code-block:: python + + >>> s = hs.signals.Signal1D(np.random.rand(10, 512)) + >>> d = hs.signals.Signal1D(np.cos(np.linspace(0., 2*np.pi, 512))) + >>> s.map(lambda A, B: A * B, B=d) + 100%|██████████| 10/10 [00:00<00:00, 2573.19it/s] + Cropping ^^^^^^^^ @@ -1048,6 +1066,30 @@ to reverse the :py:func:`~.utils.stack` function: Splitting example. +FFT and iFFT +^^^^^^^^^^^^ + +The Fast Fourier transform and its inverse can be applied on a signal with the :py:meth:`~.signal.BaseSignal.fft` and the :py:meth:`~.signal.BaseSignal.ifft` methods. + +.. code-block:: python + + >>> import numpy as np + >>> im = hs.datasets.example_signals.object_hologram() + >>> np.log(im.fft(shifted=True).amplitude).plot() + +.. figure:: images/hologram_fft.png + :align: center + :width: 400 + +Note that for visual inspection of FFT it is common to plot logarithm of amplitude rather than FFT itself as it is done + in the example above. + +By default both methods calculate FFT and IFFT with origin at (0, 0) (not in the centre of FFT). Use `shifted=True` option to +calculate FFT and the inverse with origin shifted in the centre. + +.. code-block:: python + + >>> im_ifft = im.fft(shifted=True).ifft(shifted=True) .. _signal.change_dtype: @@ -1210,7 +1252,7 @@ The convenience methods :py:meth:`~.signal.BaseSignal.as_signal1D` and :py:meth:`~.signal.BaseSignal.transpose`, but always optimize the data for iteration over the navigation axes if required. Hence, these methods do not always return a view of the original data. If a copy of the data is required -use +use :py:meth:`~.signal.BaseSignal.deepcopy` on the output of any of these methods e.g.: diff --git a/doc/user_guide/visualisation.rst b/doc/user_guide/visualisation.rst index c0c10a3b80..bbf567e72f 100644 --- a/doc/user_guide/visualisation.rst +++ b/doc/user_guide/visualisation.rst @@ -7,10 +7,10 @@ Data visualization The object returned by :py:func:`~.io.load`, a :py:class:`~.signal.BaseSignal` instance, has a :py:meth:`~.signal.BaseSignal.plot` method that is powerful and -flexible tools to visualize n-dimensional data. In this chapter, the -visualisation of multidimensional data is exemplified with two experimental +flexible to visualize n-dimensional data. In this chapter, the +visualisation of multidimensional data is exemplified with two experimental datasets: an EELS spectrum image and an EDX dataset consisting of a secondary -electron emission image stack and a 3D hyperspectrum , both simultaneously +electron emission image stack and a 3D hyperspectral image, both simultaneously acquired by recording two signals in parallel in a FIB/SEM. @@ -50,12 +50,24 @@ the x-axis if 1D: To change the current coordinates, click on the pointer (which will be a line or a square depending on the dimensions of the data) and drag it around. It is also possible to move the pointer by using the numpad arrows **when numlock is -on and the spectrum or navigator figure is selected**.When using the numpad -arrows the PageUp and PageDown keys change the size of the step. +on and the spectrum or navigator figure is selected**. When using the keyboard +arrows the PageUp and PageDown keys change the stepsize. An extra cursor can be added by pressing the ``e`` key. Pressing ``e`` once more will disable the extra cursor: +In matplotlib, left and right arrow keys are by default set to navigate the +"zoom" history. To avoid the problem of changing zoom while navigating, +Ctrl + arrows can be used instead. Navigating without using the modifier keys +will be deprecated in version 2.0. + +To navigate navigation dimensions larger than 2, modifier keys can be used. +The defaults are Shift + left/right and Shift + up/down, (Alt + left/right and Alt + up/down) +for navigating dimensions 2 and 3 (4 and 5) respectively. Modifier keys do not work with the numpad. + +Hotkeys and modifier keys for navigating the plot can be set in the ``hs.preferences.gui()``. +Note that some combinations will not work for all platforms, as some systems reserve them for +other purposes. .. _second_pointer.png: .. figure:: images/second_pointer.png @@ -69,17 +81,19 @@ can be too small to be dragged or even seen. It is possible to change the size of the cursors by pressing the ``+`` and ``-`` keys **when the navigator window is selected**. -========= ============================= -key function -========= ============================= -e Switch second pointer on/off -Arrows Change coordinates -PageUp Increase step size -PageDown Decrease step size -``+`` Increase pointer size -``-`` Decrease pointer size -``h`` Launch the contrast adjustment tool (only for Signal2D) -========= ============================= +======================= ============================= +key function +======================= ============================= +e Switch second pointer on/off +Ctrl + Arrows Change coordinates for dimensions 0 and 1 (typically x and y) +Shift + Arrows Change coordinates for dimensions 2 and 3 +Alt + Arrows Change coordinates for dimensions 4 and 5 +PageUp Increase step size +PageDown Decrease step size +``+`` Increase pointer size +``-`` Decrease pointer size +``h`` Launch the contrast adjustment tool (only for Signal2D) +======================= ============================= To close all the figures run the following command: @@ -182,11 +196,11 @@ them as a dictionary in ``navigator_kwds`` argument when plotting: .. versionadded:: 0.8.1 When plotting using divergent colormaps, if ``centre_colormap`` is ``True`` -(default) the constrast is automatically adjusted so that zero corresponds to +(default) the contrast is automatically adjusted so that zero corresponds to the center of the colormap (usually white). This can be useful e.g. when displaying images that contain pixels with both positive and negative values. -The following example shows the effect of centering the color map: +The following example shows the effect of centring the color map: .. code-block:: python @@ -548,8 +562,99 @@ which is used to call subplots_adjust method of matplotlib .. NOTE:: This padding can also be changed interactively by clicking on the - |subplots_adjust| button in the GUI (button may be different when - using different graphical backends). + |subplots_adjust| button in the GUI (button may be different when using + different graphical backends). + +Finally, the ``cmap`` option of :py:func:`~.drawing.utils.plot_images` +supports iterable types, allowing the user to specify different colormaps +for the different images that are plotted by providing a list or other +generator: + +.. code-block:: python + + >>> si_EDS = hs.load("core_shell.hdf5") + >>> im = si_EDS.get_lines_intensity() + >>> hs.plot.plot_images(hs.transpose(im[0], im[1]), + >>> tight_layout=True, cmap=['viridis', 'plasma'], axes_decor='off', + >>> colorbar='multi', saturated_pixels=2, scalebar=[0], + >>> scalebar_color='white', suptitle_fontsize=16) + +.. figure:: images/plot_images_eds_cmap_list.png + :align: center + :width: 500 + + Using :py:func:`~.drawing.utils.plot_images` to plot the output of + :py:meth:`~._signals.eds.EDS_mixin.get_lines_intensity` using a unique + colormap for each image. + +The ``cmap`` argument can also be given as ``'mpl_colors'``, and as a result, +the images will be plotted with colormaps generated from the default +``matplotlib`` colors, which is very helpful when plotting multiple spectral +signals and their relative intensities (such as the results of a +:py:func:`~.learn.mva.decomposition` analysis). This example uses +:py:func:`~.drawing.utils.plot_spectra`, which is explained in the +`next section`__. + +__ plot.spectra_ + +.. code-block:: python + + >>> si_EDS = hs.load("core_shell.hdf5") + >>> si_EDS.change_dtype('float') + >>> si_EDS.decomposition(True, algorithm='nmf', output_dimension=3) + >>> factors = si_EDS.get_decomposition_factors() + >>> + >>> # the first factor is a very strong carbon background component, so we + >>> # normalize factor intensities for easier qualitative comparison + >>> for f in factors: + >>> f.data /= f.data.max() + >>> + >>> loadings = si_EDS.get_decomposition_loadings() + >>> + >>> hs.plot.plot_spectra(factors.isig[:14.0], style='cascade', + >>> padding=-1) + >>> + >>> # add some lines to nicely label the peak positions + >>> plt.axvline(6.403, c='C2', ls=':', lw=0.5) + >>> plt.text(x=6.503, y=0.85, s='Fe-K$_\\alpha$', color='C2') + >>> plt.axvline(9.441, c='C1', ls=':', lw=0.5) + >>> plt.text(x=9.541, y=0.85, s='Pt-L$_\\alpha$', color='C1') + >>> plt.axvline(2.046, c='C1', ls=':', lw=0.5) + >>> plt.text(x=2.146, y=0.85, s='Pt-M', color='C1') + >>> plt.axvline(8.040, ymax=0.8, c='k', ls=':', lw=0.5) + >>> plt.text(x=8.14, y=0.35, s='Cu-K$_\\alpha$', color='k') + >>> + >>> hs.plot.plot_images(loadings, cmap='mpl_colors', + >>> axes_decor='off', per_row=1, + >>> label=['Background', 'Pt core', 'Fe shell'], + >>> scalebar=[0], scalebar_color='white', + >>> padding={'top': 0.95, 'bottom': 0.05, + >>> 'left': 0.05, 'right':0.78}) + + +.. figure:: images/plot_images_eds_cmap_factors_side_by_side.png + :align: center + :width: 500 + + Using :py:func:`~.drawing.utils.plot_images` with ``cmap='mpl_colors'`` + together with :py:func:`~.drawing.utils.plot_spectra` to visualize the + output of a non-negative matrix factorization of the EDS data. + + +.. NOTE:: + + Because it does not make sense, it is not allowed to use a list or + other iterable type for the ``cmap`` argument together with ``'single'`` + for the ``colorbar`` argument. Such an input will cause a warning and + instead set the ``colorbar`` argument to ``None``. + +.. versionadd: 1.4 + Double-clicking into an axis in the panel created by ``plot_images`` + triggers a plot event, creating a new figure in which the selected signal is + presented alone. This helps navigating through panels with many figures by + selecting and enlarging some of them and allowing comfortable zooming. This + functionality is only enabled if a ``matplotlib`` backend that supports the + ``button_press_event`` in the figure canvas is being used. .. _plot.spectra: diff --git a/hyperspy/Release.py b/hyperspy/Release.py index 110a856837..eb77e700d8 100644 --- a/hyperspy/Release.py +++ b/hyperspy/Release.py @@ -25,7 +25,7 @@ # When running setup.py the ".dev" string will be replaced (if possible) # by the output of "git describe" if git is available or the git # hash if .git is present. -version = "1.3.2" +version = "1.4.dev" description = "Multidimensional data analysis toolbox" license = 'GPL v3' diff --git a/hyperspy/_components/volume_plasmon_drude.py b/hyperspy/_components/volume_plasmon_drude.py index 56bd27adf7..ee33306c96 100644 --- a/hyperspy/_components/volume_plasmon_drude.py +++ b/hyperspy/_components/volume_plasmon_drude.py @@ -23,7 +23,7 @@ class VolumePlasmonDrude(Component): - r"""Drude volume plasmon energy loss function component, the energy loss + r"""Drude volume plasmon energy loss function component, the energy loss function is defined as: .. math:: diff --git a/hyperspy/_signals/common_signal1d.py b/hyperspy/_signals/common_signal1d.py index 5c1edbc95d..d6dd84be65 100644 --- a/hyperspy/_signals/common_signal1d.py +++ b/hyperspy/_signals/common_signal1d.py @@ -28,7 +28,7 @@ class CommonSignal1D(object): def to_signal2D(self, optimize=True): """Returns the one dimensional signal as a two dimensional signal. - By default ensures the data is stored optimally, hence often making a + By default ensures the data is stored optimally, hence often making a copy of the data. See `transpose` for a more general method with more options. diff --git a/hyperspy/_signals/dielectric_function.py b/hyperspy/_signals/dielectric_function.py index 56682f9bcb..b95d51ab32 100644 --- a/hyperspy/_signals/dielectric_function.py +++ b/hyperspy/_signals/dielectric_function.py @@ -122,6 +122,7 @@ def get_electron_energy_loss_spectrum(self, zlp, t): data = ((-1 / self.data).imag * eels_constant(self, zlp, t).data * self.axes_manager.signal_axes[0].scale) s = self._deepcopy_with_new_data(data) + s.data = s.data.real s.set_signal_type("EELS") s.metadata.General.title = ("EELS calculated from " + self.metadata.General.title) diff --git a/hyperspy/_signals/eds.py b/hyperspy/_signals/eds.py index 00f14f8af9..34263a0f24 100644 --- a/hyperspy/_signals/eds.py +++ b/hyperspy/_signals/eds.py @@ -195,13 +195,15 @@ def rebin(self, new_shape=None, scale=None, crop=True, out=None): elif "Acquisition_instrument.TEM.Detector.EDS.real_time" in m.metadata: aimd.TEM.Detector.EDS.real_time *= time_factor else: - _logger.info("real_time could not be found in the metadata and has not been updated.") + _logger.info( + "real_time could not be found in the metadata and has not been updated.") if "Acquisition_instrument.SEM.Detector.EDS.live_time" in m.metadata: aimd.SEM.Detector.EDS.live_time *= time_factor elif "Acquisition_instrument.TEM.Detector.EDS.live_time" in m.metadata: aimd.TEM.Detector.EDS.live_time *= time_factor else: - _logger.info("Live_time could not be found in the metadata and has not been updated.") + _logger.info( + "Live_time could not be found in the metadata and has not been updated.") if out is None: return m @@ -479,7 +481,7 @@ def _get_lines_from_elements(self, only_lines = utils_eds._parse_only_lines(only_lines) try: beam_energy = self._get_beam_energy() - except: + except BaseException: # Fall back to the high_value of the energy axis beam_energy = self.axes_manager.signal_axes[0].high_value lines = [] diff --git a/hyperspy/_signals/eels.py b/hyperspy/_signals/eels.py index 5384a49f7a..dabfbde0c7 100644 --- a/hyperspy/_signals/eels.py +++ b/hyperspy/_signals/eels.py @@ -863,7 +863,8 @@ def _are_microscope_parameters_missing(self, ignore_parameters=[]): missing_parameters = [] for item in must_exist: exists = self.metadata.has_item(item) - if exists is False and item.split('.')[-1] not in ignore_parameters: + if exists is False and item.split( + '.')[-1] not in ignore_parameters: missing_parameters.append(item) if missing_parameters: _logger.info("Missing parameters {}".format(missing_parameters)) @@ -1199,9 +1200,11 @@ def kramers_kronig_analysis(self, "thickness information, not both") elif n is not None: # normalize using the refractive index. - K = (Im / eaxis).sum(axis=axis.index_in_array) * axis.scale - K = (K / (np.pi / 2) / (1 - 1. / n ** 2)).reshape( - np.insert(K.shape, axis.index_in_array, 1)) + K = (Im / eaxis).sum(axis=axis.index_in_array, keepdims=True) \ + * axis.scale + K = (K / (np.pi / 2) / (1 - 1. / n ** 2)) + # K = (K / (np.pi / 2) / (1 - 1. / n ** 2)).reshape( + # np.insert(K.shape, axis.index_in_array, 1)) # Calculate the thickness only if possible and required if zlp is not None and (full_output is True or iterations > 1): @@ -1279,9 +1282,11 @@ def kramers_kronig_analysis(self, self.tmp_parameters.filename + '_CDF_after_Kramers_Kronig_transform') if 'thickness' in output: - thickness = eps._get_navigation_signal( - data=te[self.axes_manager._get_data_slice( - [(axis.index_in_array, 0)])]) + # As above,prevent errors if the signal is a single spectrum + if len(te) != 1: + te = te[self.axes_manager._get_data_slice( + [(axis.index_in_array, 0)])] + thickness = eps._get_navigation_signal(data=te) thickness.metadata.General.title = ( self.metadata.General.title + ' thickness ' '(calculated using Kramers-Kronig analysis)') diff --git a/hyperspy/_signals/hologram_image.py b/hyperspy/_signals/hologram_image.py index 4a9be61bd9..9847e9e65e 100644 --- a/hyperspy/_signals/hologram_image.py +++ b/hyperspy/_signals/hologram_image.py @@ -21,6 +21,7 @@ import scipy.constants as constants import numpy as np from dask.array import Array as daArray +from pint import UnitRegistry, UndefinedUnitError from hyperspy._signals.signal2d import Signal2D from hyperspy.signal import BaseSignal @@ -28,6 +29,7 @@ from hyperspy._signals.lazy import LazySignal from hyperspy.misc.holography.reconstruct import ( reconstruct, estimate_sideband_position, estimate_sideband_size) +from hyperspy.misc.holography.tools import calculate_carrier_frequency, estimate_fringe_contrast_fourier _logger = logging.getLogger(__name__) @@ -37,6 +39,96 @@ def _first_nav_pixel_data(s): s.axes_manager.navigation_dimension] +def _parse_sb_position(s, reference, sb_position, sb, high_cf, parallel): + + if sb_position is None: + _logger.warning('Sideband position is not specified. The sideband ' + 'will be found automatically which may cause ' + 'wrong results.') + if reference is None: + sb_position = s.estimate_sideband_position( + sb=sb, high_cf=high_cf, parallel=parallel) + else: + sb_position = reference.estimate_sideband_position( + sb=sb, high_cf=high_cf, parallel=parallel) + + else: + if isinstance(sb_position, BaseSignal) and \ + not sb_position._signal_dimension == 1: + raise ValueError('sb_position dimension has to be 1.') + + if not isinstance(sb_position, Signal1D): + sb_position = Signal1D(sb_position) + if isinstance(sb_position.data, daArray): + sb_position = sb_position.as_lazy() + + if not sb_position.axes_manager.signal_size == 2: + raise ValueError('sb_position should to have signal size of 2.') + + if sb_position.axes_manager.navigation_size != s.axes_manager.navigation_size: + if sb_position.axes_manager.navigation_size: + raise ValueError('Sideband position dimensions do not match' + ' neither reference nor hologram dimensions.') + # sb_position navdim=0, therefore map function should not iterate: + else: + sb_position_temp = sb_position.data + else: + sb_position_temp = sb_position.deepcopy() + return sb_position, sb_position_temp + + +def _parse_sb_size(s, reference, sb_position, sb_size, parallel): + # Default value is 1/2 distance between sideband and central band + if sb_size is None: + if reference is None: + sb_size = s.estimate_sideband_size( + sb_position, parallel=parallel) + else: + sb_size = reference.estimate_sideband_size( + sb_position, parallel=parallel) + else: + if not isinstance(sb_size, BaseSignal): + if isinstance(sb_size, + (np.ndarray, daArray)) and sb_size.size > 1: + # transpose if np.array of multiple instances + sb_size = BaseSignal(sb_size).T + else: + sb_size = BaseSignal(sb_size) + if isinstance(sb_size.data, daArray): + sb_size = sb_size.as_lazy() + + if sb_size.axes_manager.navigation_size != s.axes_manager.navigation_size: + if sb_size.axes_manager.navigation_size: + raise ValueError('Sideband size dimensions do not match ' + 'neither reference nor hologram dimensions.') + # sb_position navdim=0, therefore map function should not iterate: + else: + sb_size_temp = np.float64(sb_size.data) + else: + sb_size_temp = sb_size.deepcopy() + return sb_size, sb_size_temp + + +def _estimate_fringe_contrast_statistical(holo): + """ + Estimates average fringe contrast of a hologram using statistical definition: + V = STD / MEAN. + + Parameters + ---------- + holo_data: ndarray + The data of the hologram. + + Returns + ------- + Fringe contrast as a float + """ + + axes = holo.axes_manager.signal_axes + + return holo.std(axes) / holo.mean(axes) + + class HologramImage(Signal2D): """Image subclass for holograms acquired via off-axis electron holography.""" @@ -86,6 +178,7 @@ def set_microscope_parameters(self, def estimate_sideband_position(self, ap_cb_radius=None, sb='lower', + high_cf=True, show_progressbar=False, parallel=None): """ @@ -97,8 +190,12 @@ def estimate_sideband_position(self, The aperture radius used to mask out the centerband. sb : str, optional Chooses which sideband is taken. 'lower' or 'upper' + high_cf : bool, optional + If False, the highest carrier frequency allowed for the sideband location is equal to + half of the Nyquist frequency (Default: True). show_progressbar : boolean - Shows progressbar while iterating over different slices of the signal (passes the parameter to map method). + Shows progressbar while iterating over different slices of the signal (passes the + parameter to map method). parallel : bool Estimate the positions in parallel @@ -123,6 +220,7 @@ def estimate_sideband_position(self, self.axes_manager.signal_axes[1].scale), central_band_mask_radius=ap_cb_radius, sb=sb, + high_cf=high_cf, show_progressbar=show_progressbar, inplace=False, parallel=parallel, @@ -181,6 +279,7 @@ def reconstruct_phase(self, sb_unit=None, sb='lower', sb_position=None, + high_cf=True, output_shape=None, plotting=False, show_progressbar=False, @@ -216,6 +315,9 @@ def reconstruct_phase(self, sb_position : tuple, :class:`~hyperspy.signals.Signal1D, None The sideband position (y, x), referred to the non-shifted FFT. If None, sideband is determined automatically from FFT. + high_cf : bool, optional + If False, the highest carrier frequency allowed for the sideband location is equal to + half of the Nyquist frequency (Default: True). output_shape: tuple, None Choose a new output shape. Default is the shape of the input hologram. The output shape should not be larger than the input @@ -285,70 +387,19 @@ def reconstruct_phase(self, ' holograms do not match') # Parsing sideband position: - if sb_position is None: - _logger.warning('Sideband position is not specified. The sideband ' - 'will be found automatically which may cause ' - 'wrong results.') - if reference is None: - sb_position = self.estimate_sideband_position( - sb=sb, parallel=parallel) - else: - sb_position = reference.estimate_sideband_position( - sb=sb, parallel=parallel) - - else: - if isinstance(sb_position, BaseSignal) and \ - not sb_position._signal_dimension == 1: - raise ValueError('sb_position dimension has to be 1') - - if not isinstance(sb_position, Signal1D): - sb_position = Signal1D(sb_position) - if isinstance(sb_position.data, daArray): - sb_position = sb_position.as_lazy() - - if not sb_position.axes_manager.signal_size == 2: - raise ValueError('sb_position should to have signal size of 2') - - if sb_position.axes_manager.navigation_size != self.axes_manager.navigation_size: - if sb_position.axes_manager.navigation_size: - raise ValueError('Sideband position dimensions do not match' - ' neither reference nor hologram dimensions.') - # sb_position navdim=0, therefore map function should not iterate: - else: - sb_position_temp = sb_position.data - else: - sb_position_temp = sb_position.deepcopy() - - # Parsing sideband size - - # Default value is 1/2 distance between sideband and central band - if sb_size is None: - if reference is None: - sb_size = self.estimate_sideband_size( - sb_position, parallel=parallel) - else: - sb_size = reference.estimate_sideband_size( - sb_position, parallel=parallel) - else: - if not isinstance(sb_size, BaseSignal): - if isinstance(sb_size, - (np.ndarray, daArray)) and sb_size.size > 1: - # transpose if np.array of multiple instances - sb_size = BaseSignal(sb_size).T - else: - sb_size = BaseSignal(sb_size) - if isinstance(sb_size.data, daArray): - sb_size = sb_size.as_lazy() - - if sb_size.axes_manager.navigation_size != self.axes_manager.navigation_size: - if sb_size.axes_manager.navigation_size: - raise ValueError('Sideband size dimensions do not match ' - 'neither reference nor hologram dimensions.') - # sb_position navdim=0, therefore map function should not iterate: - else: - sb_size_temp = np.float64(sb_size.data) - else: - sb_size_temp = sb_size.deepcopy() + (sb_position, sb_position_temp) = _parse_sb_position(self, + reference, + sb_position, + sb, + high_cf, + parallel) + + # Parsing sideband size: + (sb_size, sb_size_temp) = _parse_sb_size(self, + reference, + sb_position, + sb_size, + parallel) # Standard edge smoothness of sideband aperture 5% of sb_size if sb_smoothness is None: @@ -397,7 +448,7 @@ def reconstruct_phase(self, ) try: ht = self.metadata.Acquisition_instrument.TEM.beam_energy - except: + except BaseException: raise AttributeError("Please define the beam energy." "You can do this e.g. by using the " "set_microscope_parameters method") @@ -533,6 +584,185 @@ def reconstruct_phase(self, return wave_image + def statistics(self, + sb_position=None, + sb='lower', + high_cf=False, + fringe_contrast_algorithm='statistical', + apodization='hanning', + single_values=True, + show_progressbar=False, + parallel=None): + """ + Calculates following statistics for off-axis electron holograms: + + 1. Fringe contrast using either statistical definition or + Fourier space approach (see description of `fringe_contrast_algorithm` parameter) + 2. Fringe sampling (in pixels) + 3. Fringe spacing (in calibrated units) + 4. Carrier frequency (in calibrated units, radians and 1/px) + + Parameters + ---------- + sb_position : tuple, :class:`~hyperspy.signals.Signal1D, None + The sideband position (y, x), referred to the non-shifted FFT. + It has to be tuple or to have the same dimensionality as the hologram. + If None, sideband is determined automatically from FFT. + sb : str, None + Select which sideband is selected. 'upper', 'lower', 'left' or 'right'. + high_cf : bool, optional + If False, the highest carrier frequency allowed for the sideband location is equal to + half of the Nyquist frequency (Default: False). + fringe_contrast_algorithm : str + Select fringe contrast algorithm between: + + 'fourier' + fringe contrast is estimated as: + 2 * / , + where I(k_0) is intensity of sideband and I(0) is the intensity of central band (FFT origin). + This method delivers also reasonable estimation if + interference pattern do not cover full field of view. + 'statistical' + fringe contrast is estimated by dividing standard deviation by mean + of the hologram intensity in real space. This algorithm relays on that the fringes are regular and + covering entire field of view. + + (Default: 'statistical') + apodization: str or None, optional + Used with `fringe_contrast_algorithm='fourier'`. If 'hanning' or 'hamming' apodization window + will be applied in real space before FFT for estimation of fringe contrast. + Apodization is typically needed to suppress striking due to sharp edges of the image, + which often results in underestimation of the fringe contrast. (Default: 'hanning') + single_values : bool, optional + If True calculates statistics only for the first navigation pixels and + returns the values as single floats (Default: True) + show_progressbar : bool, optional + Shows progressbar while iterating over different slices of the + signal (passes the parameter to map method). (Default: False) + parallel : bool, None, optional + Run the reconstruction in parallel + + Returns + ------- + statistics_dict : + Dictionary with the statistics + + Examples + -------- + >>> import hyperspy.api as hs + >>> s = hs.datasets.example_signals.reference_hologram() + >>> sb_position = s.estimate_sideband_position(high_cf=True) + >>> s.statistics(sb_position=sb_position) + {'Fringe spacing (nm)': 3.4860442674236256, + 'Carrier frequency (1/px)': 0.26383819985575441, + 'Carrier frequency (mrad)': 0.56475154609203482, + 'Fringe contrast': 0.071298357213623778, + 'Fringe sampling (px)': 3.7902017241882331, + 'Carrier frequency (1 / nm)': 0.28685808994016415} + """ + + # Testing match of navigation axes of reference and self + # (exception: reference nav_dim=1): + + # Parsing sideband position: + (sb_position, sb_position_temp) = _parse_sb_position( + self, None, sb_position, sb, high_cf, parallel) + + # Calculate carrier frequency in 1/px and fringe sampling: + fourier_sampling = 1. / np.array(self.axes_manager.signal_shape) + if single_values: + carrier_freq_px = calculate_carrier_frequency(_first_nav_pixel_data(self), + sb_position=_first_nav_pixel_data( + sb_position), + scale=fourier_sampling) + else: + carrier_freq_px = self.map(calculate_carrier_frequency, + sb_position=sb_position, + scale=fourier_sampling, + inplace=False, + ragged=False, + show_progressbar=show_progressbar, + parallel=parallel) + fringe_sampling = np.divide(1., carrier_freq_px) + + ureg = UnitRegistry() + try: + units = ureg.parse_expression( + str(self.axes_manager.signal_axes[0].units)) + except UndefinedUnitError: + raise ValueError('Signal axes units should be defined.') + + # Calculate carrier frequency in 1/units and fringe spacing in units: + f_sampling_units = np.divide( + 1., + [a * b for a, b in + zip(self.axes_manager.signal_shape, + (self.axes_manager.signal_axes[0].scale, + self.axes_manager.signal_axes[1].scale))] + ) + if single_values: + carrier_freq_units = calculate_carrier_frequency(_first_nav_pixel_data(self), + sb_position=_first_nav_pixel_data( + sb_position), + scale=f_sampling_units) + else: + carrier_freq_units = self.map(calculate_carrier_frequency, + sb_position=sb_position, + scale=f_sampling_units, + inplace=False, + ragged=False, + show_progressbar=show_progressbar, + parallel=parallel) + fringe_spacing = np.divide(1., carrier_freq_units) + + # Calculate carrier frequency in mrad: + try: + ht = self.metadata.Acquisition_instrument.TEM.beam_energy + except BaseException: + raise AttributeError("Please define the beam energy." + "You can do this e.g. by using the " + "set_microscope_parameters method.") + + momentum = 2 * constants.m_e * constants.elementary_charge * ht * \ + 1000 * (1 + constants.elementary_charge * ht * + 1000 / (2 * constants.m_e * constants.c ** 2)) + wavelength = constants.h / np.sqrt(momentum) * 1e9 # in nm + carrier_freq_quantity = wavelength * \ + ureg('nm') * carrier_freq_units / units * ureg('rad') + carrier_freq_mrad = carrier_freq_quantity.to('mrad').magnitude + + # Calculate fringe contrast: + if fringe_contrast_algorithm == 'fourier': + if single_values: + fringe_contrast = estimate_fringe_contrast_fourier(_first_nav_pixel_data(self), + sb_position=_first_nav_pixel_data( + sb_position), + apodization=apodization) + else: + fringe_contrast = self.map(estimate_fringe_contrast_fourier, + sb_position=sb_position, + apodization=apodization, + inplace=False, + ragged=False, + show_progressbar=show_progressbar, + parallel=parallel) + elif fringe_contrast_algorithm == 'statistical': + if single_values: + fringe_contrast = _first_nav_pixel_data( + self).std() / _first_nav_pixel_data(self).mean() + else: + fringe_contrast = _estimate_fringe_contrast_statistical(self) + else: + raise ValueError( + "fringe_contrast_algorithm can only be set to fourier or statistical.") + + return {'Fringe contrast': fringe_contrast, + 'Fringe sampling (px)': fringe_sampling, + 'Fringe spacing ({:~})'.format(units.units): fringe_spacing, + 'Carrier frequency (1/px)': carrier_freq_px, + 'Carrier frequency ({:~})'.format((1. / units).units): carrier_freq_units, + 'Carrier frequency (mrad)': carrier_freq_mrad} + class LazyHologramImage(LazySignal, HologramImage): diff --git a/hyperspy/_signals/lazy.py b/hyperspy/_signals/lazy.py index 850a3e51cc..8fc5589509 100644 --- a/hyperspy/_signals/lazy.py +++ b/hyperspy/_signals/lazy.py @@ -83,17 +83,46 @@ class LazySignal(BaseSignal): """ _lazy = True - def compute(self, progressbar=True): - """Attempt to store the full signal in memory..""" + def compute(self, progressbar=True, close_file=False): + """Attempt to store the full signal in memory. + + close_file: bool + If True, attemp to close the file associated with the dask + array data if any. Note that closing the file will make all other + associated lazy signals inoperative. + + """ if progressbar: cm = ProgressBar else: cm = dummy_context_manager with cm(): - self.data = self.data.compute() + da = self.data + data = da.compute() + if close_file: + self.close_file() + self.data = data self._lazy = False self._assign_subclass() + def close_file(self): + """Closes the associated data file if any. + + Currently it only supports closing the file associated with a dask + array created from an h5py DataSet (default HyperSpy hdf5 reader). + + """ + arrkey = None + for key in self.data.dask.keys(): + if "array-original" in key: + arrkey = key + break + if arrkey: + try: + self.data.dask[arrkey].file.close() + except AttributeError as e: + _logger.exception("Failed to close lazy Signal file") + def _get_dask_chunks(self, axis=None, dtype=None): """Returns dask chunks Aims: diff --git a/hyperspy/_signals/signal1d.py b/hyperspy/_signals/signal1d.py index 5ed912ad77..fdb0dce100 100644 --- a/hyperspy/_signals/signal1d.py +++ b/hyperspy/_signals/signal1d.py @@ -62,44 +62,53 @@ def find_peaks_ohaver(y, x=None, slope_thresh=0., amp_thresh=None, medfilt_radius=5, maxpeakn=30000, peakgroup=10, subchannel=True,): """Find peaks along a 1D line. + Function to locate the positive peaks in a noisy x-y data set. Detects peaks by looking for downward zero-crossings in the first derivative that exceed 'slope_thresh'. Returns an array containing position, height, and width of each peak. Sorted by position. - 'slope_thresh' and 'amp_thresh', control sensitivity: higher values will - neglect smaller features. + 'slope_thresh' and 'amp_thresh', control sensitivity: higher values + will neglect wider peaks (slope) and smaller features (amp), + respectively. + Parameters - --------- + ---------- + y : array 1D input array, e.g. a spectrum x : array (optional) 1D array describing the calibration of y (must have same shape as y) slope_thresh : float (optional) - 1st derivative threshold to count the peak - default is set to 0.5 - higher values will neglect smaller features. + 1st derivative threshold to count the peak; + higher values will neglect broader features; + default is set to 0. amp_thresh : float (optional) - intensity threshold above which - default is set to 10% of max(y) - higher values will neglect smaller features. + intensity threshold below which peaks are ignored; + higher values will neglect smaller features; + default is set to 10% of max(y). medfilt_radius : int (optional) median filter window to apply to smooth the data - (see scipy.signal.medfilt) - if 0, no filter will be applied. - default is set to 5 + (see scipy.signal.medfilt); + if 0, no filter will be applied; + default is set to 5. peakgroup : int (optional) - number of points around the "top part" of the peak - default is set to 10 + number of points around the "top part" of the peak that + are taken to estimate the peak height; for spikes or + very narrow peaks, keep PeakGroup=1 or 2; for broad or + noisy peaks, make PeakGroup larger to reduce the effect + of noise; + default is set to 10. maxpeakn : int (optional) - number of maximum detectable peaks - default is set to 30000 + number of maximum detectable peaks; + default is set to 30000. subchannel : bool (optional) - default is set to True + default is set to True. Returns ------- - P : structured array of shape (npeaks) and fields: position, width, height - contains position, height, and width of each peak + P : structured array of shape (npeaks) + contains fields: 'position', 'width', and 'height' for each peak. + Examples -------- >>> x = np.arange(0,50,0.01) @@ -1014,7 +1023,7 @@ def filter_butterworth(self, def _remove_background_cli( self, signal_range, background_estimator, fast=True, - show_progressbar=None): + zero_fill=False, show_progressbar=None): signal_range = signal_range_from_roi(signal_range) from hyperspy.models.model1d import Model1D model = Model1D(self) @@ -1028,7 +1037,17 @@ def _remove_background_cli( model.set_signal_range(signal_range[0], signal_range[1]) model.multifit(show_progressbar=show_progressbar) model.reset_signal_range() - return self - model.as_signal(show_progressbar=show_progressbar) + result = self - model.as_signal(show_progressbar=show_progressbar) + + if zero_fill: + if self._lazy: + low_idx = result.axes_manager[-1].value2index(signal_range[0]) + z = da.zeros(low_idx, chunks=(low_idx,)) + cropped_da = result.data[low_idx:] + result.data = da.concatenate([z, cropped_da]) + else: + result.isig[:signal_range[0]] = 0 + return result def remove_background( self, @@ -1036,6 +1055,8 @@ def remove_background( background_type='Power Law', polynomial_order=2, fast=True, + zero_fill=False, + plot_remainder=True, show_progressbar=None, display=True, toolkit=None): signal_range = signal_range_from_roi(signal_range) self._check_signal_dimension_equals_one() @@ -1043,7 +1064,9 @@ def remove_background( br = BackgroundRemoval(self, background_type=background_type, polynomial_order=polynomial_order, fast=fast, - show_progressbar=show_progressbar) + plot_remainder=plot_remainder, + show_progressbar=show_progressbar, + zero_fill=zero_fill) return br.gui(display=display, toolkit=toolkit) else: if background_type in ('PowerLaw', 'Power Law'): @@ -1064,6 +1087,7 @@ def remove_background( signal_range=signal_range, background_estimator=background_estimator, fast=fast, + zero_fill=zero_fill, show_progressbar=show_progressbar) return spectra remove_background.__doc__ = \ @@ -1087,6 +1111,17 @@ def remove_background( If False, the signal is fitted using non-linear least squares afterwards.This is slower compared to the estimation but possibly more accurate. + zero_fill : bool + If True, all spectral channels lower than the lower bound of the + fitting range will be set to zero (this is the default behavior + of Gatan's DigitalMicrograph). Setting this value to False + allows for inspection of the quality of background fit throughout + the pre-fitting region. + plot_remainder : bool + If True, add a (green) line previewing the remainder signal after + background removal. This preview is obtained from a Fast calculation + so the result may be different if a NLLS calculation is finally + performed. show_progressbar : None or bool If True, display a progress bar. If None the default is set in `preferences`. @@ -1254,53 +1289,53 @@ def find_peaks1D_ohaver(self, xdim=None, slope_thresh=0, amp_thresh=None, peak. 'slope_thresh' and 'amp_thresh', control sensitivity: higher - values will - neglect smaller features. - + values will neglect broad peaks (slope) and smaller features (amp), + respectively. - peakgroup is the number of points around the top peak to search - around + peakgroup is the number of points around the top of the peak + that are taken to estimate the peak height. For spikes or very + narrow peaks, keep PeakGroup=1 or 2; for broad or noisy peaks, + make PeakGroup larger to reduce the effect of noise. Parameters - --------- - + ---------- slope_thresh : float (optional) - 1st derivative threshold to count the peak - default is set to 0.5 - higher values will neglect smaller features. + 1st derivative threshold to count the peak; + higher values will neglect broader features; + default is set to 0. amp_thresh : float (optional) - intensity threshold above which - default is set to 10% of max(y) - higher values will neglect smaller features. + intensity threshold below which peaks are ignored; + higher values will neglect smaller features; + default is set to 10% of max(y). medfilt_radius : int (optional) median filter window to apply to smooth the data - (see scipy.signal.medfilt) - if 0, no filter will be applied. - default is set to 5 + (see scipy.signal.medfilt); + if 0, no filter will be applied; + default is set to 5. peakgroup : int (optional) number of points around the "top part" of the peak + that are taken to estimate the peak height; default is set to 10 maxpeakn : int (optional) - number of maximum detectable peaks - default is set to 5000 + number of maximum detectable peaks; + default is set to 5000. - subpix : bool (optional) - default is set to True + subchannel : bool (optional) + default is set to True. parallel : {None, bool} Perform the operation in a threaded (parallel) manner. Returns ------- - peaks : structured array of shape _navigation_shape_in_array in which - each cell contains an array that contains as many structured arrays as - peaks where found at that location and which fields: position, height, - width, contains position, height, and width of each peak. + peaks : structured array of shape (npeaks) + contains fields: 'position', 'width', and 'height' for each peak. + Raises ------ diff --git a/hyperspy/_signals/signal2d.py b/hyperspy/_signals/signal2d.py index 4ee04d1391..f9200c5bc4 100644 --- a/hyperspy/_signals/signal2d.py +++ b/hyperspy/_signals/signal2d.py @@ -23,6 +23,7 @@ import scipy as sp import logging from scipy.fftpack import fftn, ifftn +from skimage.feature.register_translation import _upsampled_dft from hyperspy.defaults_parser import preferences from hyperspy.external.progressbar import progressbar @@ -96,19 +97,18 @@ def fft_correlation(in1, in2, normalize=False): size = s1 + s2 - 1 # Use 2**n-sized FFT fsize = (2 ** np.ceil(np.log2(size))).astype("int") - IN1 = fftn(in1, fsize) - IN1 *= fftn(in2, fsize).conjugate() + fprod = fftn(in1, fsize) + fprod *= fftn(in2, fsize).conjugate() if normalize is True: - ret = ifftn(np.nan_to_num(IN1 / np.absolute(IN1))).real.copy() - else: - ret = ifftn(IN1).real.copy() - del IN1 - return ret + fprod = np.nan_to_num(fprod / np.absolute(fprod)) + ret = ifftn(fprod).real.copy() + return ret, fprod def estimate_image_shift(ref, image, roi=None, sobel=True, medfilter=True, hanning=True, plot=False, dtype='float', normalize_corr=False, + sub_pixel_factor=1, return_maxval=True): """Estimate the shift in a image using phase correlation @@ -121,6 +121,10 @@ def estimate_image_shift(ref, image, roi=None, sobel=True, Parameters ---------- + ref : 2D numpy.ndarray + Reference image + image : 2D numpy.ndarray + Image to register roi : tuple of ints (top, bottom, left, right) Define the region of interest sobel : bool @@ -133,16 +137,18 @@ def estimate_image_shift(ref, image, roi=None, sobel=True, If True, plots the images after applying the filters and the phase correlation. If a figure instance, the images will be plotted to the given figure. - reference : \'current\' | \'cascade\' - If \'current\' (default) the image at the current - coordinates is taken as reference. If \'cascade\' each image + reference : 'current' | 'cascade' + If 'current' (default) the image at the current + coordinates is taken as reference. If 'cascade' each image is aligned with the previous one. dtype : str or dtype Typecode or data-type in which the calculations must be performed. - normalize_corr : bool If True use phase correlation instead of standard correlation + sub_pixel_factor : float + Estimate shifts with a sub-pixel accuracy of 1/sub_pixel_factor parts + of a pixel. Default is 1, i.e. no sub-pixel accuracy. Returns ------- @@ -153,6 +159,7 @@ def estimate_image_shift(ref, image, roi=None, sobel=True, The maximum value of the correlation """ + ref, image = da.compute(ref, image) # Make a copy of the images to avoid modifying them ref = ref.copy().astype(dtype) @@ -174,9 +181,8 @@ def estimate_image_shift(ref, image, roi=None, sobel=True, im[:] = sp.signal.medfilt(im) if sobel is True: im[:] = sobel_filter(im) - - phase_correlation = fft_correlation(ref, image, - normalize=normalize_corr) + phase_correlation, image_product = fft_correlation( + ref, image, normalize=normalize_corr) # Estimate the shift by getting the coordinates of the maximum argmax = np.unravel_index(np.argmax(phase_correlation), @@ -187,19 +193,46 @@ def estimate_image_shift(ref, image, roi=None, sobel=True, argmax[0] - phase_correlation.shape[0] shift1 = argmax[1] if argmax[1] < threshold[1] else \ argmax[1] - phase_correlation.shape[1] - max_val = phase_correlation.max() + max_val = phase_correlation.real.max() + shifts = np.array((shift0, shift1)) + + # The following code is more or less copied from + # skimage.feature.register_feature, to gain access to the maximum value: + if sub_pixel_factor != 1: + # Initial shift estimate in upsampled grid + shifts = np.round(shifts * sub_pixel_factor) / sub_pixel_factor + upsampled_region_size = np.ceil(sub_pixel_factor * 1.5) + # Center of output array at dftshift + 1 + dftshift = np.fix(upsampled_region_size / 2.0) + sub_pixel_factor = np.array(sub_pixel_factor, dtype=np.float64) + normalization = (image_product.size * sub_pixel_factor ** 2) + # Matrix multiply DFT around the current shift estimate + sample_region_offset = dftshift - shifts * sub_pixel_factor + correlation = _upsampled_dft(image_product.conj(), + upsampled_region_size, + sub_pixel_factor, + sample_region_offset).conj() + correlation /= normalization + # Locate maximum and map back to original pixel grid + maxima = np.array(np.unravel_index( + np.argmax(np.abs(correlation)), + correlation.shape), + dtype=np.float64) + maxima -= dftshift + shifts = shifts + maxima / sub_pixel_factor + max_val = correlation.real.max() # Plot on demand if plot is True or isinstance(plot, plt.Figure): if isinstance(plot, plt.Figure): - f = plot + fig = plot axarr = plot.axes if len(axarr) < 3: for i in range(3): - f.add_subplot(1, 3, i) - axarr = plot.axes + fig.add_subplot(1, 3, i + 1) + axarr = fig.axes else: - f, axarr = plt.subplots(1, 3) + fig, axarr = plt.subplots(1, 3) full_plot = len(axarr[0].images) == 0 if full_plot: axarr[0].set_title('Reference') @@ -217,15 +250,15 @@ def estimate_image_shift(ref, image, roi=None, sobel=True, axarr[1].images[0].set_data(image) axarr[2].images[0].set_data(np.fft.fftshift(phase_correlation)) # TODO: Renormalize images - f.canvas.draw_idle() + fig.canvas.draw_idle() # Liberate the memory. It is specially necessary if it is a # memory map del ref del image if return_maxval: - return -np.array((shift0, shift1)), max_val + return -shifts, max_val else: - return -np.array((shift0, shift1)) + return -shifts class Signal2D(BaseSignal, CommonSignal2D): @@ -245,6 +278,7 @@ def plot(self, scalebar=True, scalebar_color="white", axes_ticks=None, + axes_off=False, saturated_pixels=0, vmin=None, vmax=None, @@ -262,6 +296,7 @@ def plot(self, scalebar=scalebar, scalebar_color=scalebar_color, axes_ticks=axes_ticks, + axes_off=axes_off, saturated_pixels=saturated_pixels, vmin=vmin, vmax=vmax, @@ -273,14 +308,17 @@ def plot(self, def create_model(self, dictionary=None): """Create a model for the current signal + Parameters - __________ + ---------- dictionary : {None, dict}, optional - A dictionary to be used to recreate a model. Usually generated using - :meth:`hyperspy.model.as_dictionary` + A dictionary to be used to recreate a model. Usually generated + using :meth:`hyperspy.model.as_dictionary` + Returns ------- A Model class + """ from hyperspy.models.model2d import Model2D return Model2D(self, dictionary=dictionary) @@ -296,15 +334,19 @@ def estimate_shift2D(self, hanning=True, plot=False, dtype='float', - show_progressbar=None): + show_progressbar=None, + sub_pixel_factor=1): """Estimate the shifts in a image using phase correlation + This method can only estimate the shift by comparing bidimensional features that should not change position between frames. To decrease the memory usage, the time of computation and the accuracy of the results it is convenient to select a region of interest by setting the roi keyword. + Parameters ---------- + reference : {'current', 'cascade' ,'stat'} If 'current' (default) the image at the current coordinates is taken as reference. If 'cascade' each image @@ -342,20 +384,30 @@ def estimate_shift2D(self, show_progressbar : None or bool If True, display a progress bar. If None the default is set in `preferences`. + sub_pixel_factor : float + Estimate shifts with a sub-pixel accuracy of 1/sub_pixel_factor + parts of a pixel. Default is 1, i.e. no sub-pixel accuracy. + Returns ------- + list of applied shifts + Notes ----- + The statistical analysis approach to the translation estimation when using `reference`='stat' roughly follows [1]_ . If you use it please cite their article. + References ---------- + .. [1] Schaffer, Bernhard, Werner Grogger, and Gerald Kothleitner. “Automated Spatial Drift Correction for EFTEM - Signal2D Series.” + Image Series.” Ultramicroscopy 102, no. 1 (December 2004): 27–36. + """ if show_progressbar is None: show_progressbar = preferences.General.show_progressbar @@ -392,7 +444,8 @@ def estimate_shift2D(self, hanning=hanning, normalize_corr=normalize_corr, plot=plot, - dtype=dtype) + dtype=dtype, + sub_pixel_factor=sub_pixel_factor) np.fill_diagonal(pcarray['max_value'], max_value) pbar_max = nrows * images_number else: @@ -411,7 +464,8 @@ def estimate_shift2D(self, nshift, max_val = estimate_image_shift( ref, im, roi=roi, sobel=sobel, medfilter=medfilter, hanning=hanning, plot=plot, - normalize_corr=normalize_corr, dtype=dtype) + normalize_corr=normalize_corr, dtype=dtype, + sub_pixel_factor=sub_pixel_factor) if reference == 'cascade': shift += nshift ref = im.copy() @@ -435,8 +489,8 @@ def estimate_shift2D(self, hanning=hanning, normalize_corr=normalize_corr, plot=plot, - dtype=dtype) - + dtype=dtype, + sub_pixel_factor=sub_pixel_factor) pcarray[i1, i2] = max_value, nshift del im2 pbar.update(1) @@ -479,13 +533,16 @@ def align2D(self, crop=True, fill_value=np.nan, shifts=None, expand=False, correlation_threshold=None, chunk_size=30, interpolation_order=1, + sub_pixel_factor=1, show_progressbar=None, parallel=None): """Align the images in place using user provided shifts or by estimating the shifts. + Please, see `estimate_shift2D` docstring for details on the rest of the parameters not documented in the following section + Parameters ---------- crop : bool @@ -504,21 +561,27 @@ def align2D(self, crop=True, fill_value=np.nan, shifts=None, expand=False, The order of the spline interpolation. Default is 1, linear interpolation. parallel : {None, bool} + Returns ------- shifts : np.array The shifts are returned only if `shifts` is None + Notes ----- + The statistical analysis approach to the translation estimation when using `reference`='stat' roughly follows [1]_ . If you use it please cite their article. + References ---------- + .. [1] Schaffer, Bernhard, Werner Grogger, and Gerald Kothleitner. “Automated Spatial Drift Correction for EFTEM - Signal2D Series.” + Image Series.” Ultramicroscopy 102, no. 1 (December 2004): 27–36. + """ self._check_signal_dimension_equals_two() if show_progressbar is None: @@ -535,6 +598,7 @@ def align2D(self, crop=True, fill_value=np.nan, shifts=None, expand=False, correlation_threshold=correlation_threshold, normalize_corr=normalize_corr, chunk_size=chunk_size, + sub_pixel_factor=sub_pixel_factor, show_progressbar=show_progressbar) return_shifts = True else: @@ -600,13 +664,18 @@ def align2D(self, crop=True, fill_value=np.nan, shifts=None, expand=False, return shifts def crop_image(self, top=None, bottom=None, - left=None, right=None): + left=None, right=None, convert_units=False): """Crops an image in place. - top, bottom, left, right : int or float - + Parameters + ---------- + top, bottom, left, right : {int | float} If int the values are taken as indices. If float the values are converted to indices. + convert_units : bool + Default is False + If True, convert the signal units using the 'convert_to_units' + method of the 'axes_manager'. If False, does nothing. See also: --------- @@ -620,6 +689,8 @@ def crop_image(self, top=None, bottom=None, self.crop(self.axes_manager.signal_axes[0].index_in_axes_manager, left, right) + if convert_units: + self.axes_manager.convert_units('signal') def add_ramp(self, ramp_x, ramp_y, offset=0): """Add a linear ramp to the signal. @@ -632,11 +703,12 @@ def add_ramp(self, ramp_x, ramp_y, offset=0): Slope of the ramp in y-direction. offset: float, optional Offset of the ramp at the signal fulcrum. + Notes ----- - The fulcrum of the linear ramp is at the origin and the slopes are given in units of - the axis with the according scale taken into account. Both are available via the - `axes_manager` of the signal. + The fulcrum of the linear ramp is at the origin and the slopes are + given in units of the axis with the according scale taken into + account. Both are available via the `axes_manager` of the signal. """ yy, xx = np.indices(self.axes_manager._signal_shape_in_array) diff --git a/hyperspy/api.py b/hyperspy/api.py index 8dfc6e3bc1..8e49040057 100644 --- a/hyperspy/api.py +++ b/hyperspy/api.py @@ -21,4 +21,4 @@ if preferences.GUIs.warn_if_guis_are_missing: _logger.warning( "The traitsui GUI elements are not available, probably because the " - "hyperspy_gui_traitui package is not installed.") + "hyperspy_gui_traitsui package is not installed.") diff --git a/hyperspy/axes.py b/hyperspy/axes.py index 37738dea02..61bd8c6a8c 100644 --- a/hyperspy/axes.py +++ b/hyperspy/axes.py @@ -23,14 +23,28 @@ import dask.array as da import traits.api as t from traits.trait_errors import TraitError +import pint +import logging from hyperspy.events import Events, Event from hyperspy.misc.utils import isiterable, ordinal from hyperspy.misc.math_tools import isfloat -from hyperspy.ui_registry import add_gui_method, get_gui, DISPLAY_DT, TOOLKIT_DT +from hyperspy.ui_registry import add_gui_method, get_gui +from hyperspy.defaults_parser import preferences + import warnings +_logger = logging.getLogger(__name__) +_ureg = pint.UnitRegistry() + + +FACTOR_DOCSTRING = \ + """factor : float (default: 0.25) + 'factor' is an adjustable value used to determine the prefix of + the units. The product `factor * scale * size` is passed to the + pint `to_compact` method to determine the prefix.""" + class ndindex_nat(np.ndindex): @@ -56,13 +70,140 @@ def generate_axis(offset, scale, size, offset_index=0): Numpy array """ + return np.linspace(offset - offset_index * scale, offset + scale * (size - 1 - offset_index), size) +class UnitConversion: + + def __init__(self, units=t.Undefined, scale=1.0, offset=0.0): + self.units = units + self.scale = scale + self.offset = units + + def _ignore_conversion(self, units): + if units == t.Undefined: + return True + try: + _ureg(units) + except pint.errors.UndefinedUnitError: + warnings.warn('Unit "{}" not supported for conversion. Nothing ' + 'done.'.format(units), + UserWarning) + return True + return False + + def _convert_compact_units(self, factor=0.25, inplace=True): + """ Convert units to "human-readable" units, which means with a + convenient prefix. + + Parameters + ---------- + %s + """ + if self._ignore_conversion(self.units): + return + scale = self.scale * _ureg(self.units) + scale_size = factor * scale * self.size + converted_units = '{:~}'.format(scale_size.to_compact().units) + return self._convert_units(converted_units, inplace=inplace) + + _convert_compact_units.__doc__ %= FACTOR_DOCSTRING + + def _get_index_from_value_with_units(self, value): + value = _ureg.parse_expression(value) + if not hasattr(value, 'units'): + raise ValueError('"{}" should contains an units.'.format(value)) + return self.value2index(value.to(self.units).magnitude) + + def _convert_units(self, converted_units, inplace=True): + if self._ignore_conversion(converted_units) or \ + self._ignore_conversion(self.units): + return + scale_pint = self.scale * _ureg(self.units) + offset_pint = self.offset * _ureg(self.units) + scale = float(scale_pint.to(_ureg(converted_units)).magnitude) + offset = float(offset_pint.to(_ureg(converted_units)).magnitude) + units = '{:~}'.format(scale_pint.to(_ureg(converted_units)).units) + if inplace: + self.scale = scale + self.offset = offset + self.units = units + else: + return scale, offset, units + + def convert_to_units(self, units=None, inplace=True, factor=0.25): + """ Convert the scale and the units of the current axis. If the unit + of measure is not supported by the pint library, the scale and units + are not modified. + + Parameters + ---------- + units : {str | None} + Default = None + If str, the axis will be converted to the provided units. + If `"auto"`, automatically determine the optimal units to avoid + using too large or too small numbers. This can be tweaked by the + `factor` argument. + inplace : bool + If `True`, convert the axis in place. if `False` return the + `scale`, `offset` and `units`. + %s + """ + if units is None: + out = self._convert_compact_units(factor, inplace=inplace) + else: + out = self._convert_units(units, inplace=inplace) + return out + + convert_to_units.__doc__ %= FACTOR_DOCSTRING + + def _get_quantity(self, attribute='scale'): + if attribute == 'scale' or attribute == 'offset': + units = self.units + if units == t.Undefined: + units = '' + return self.__dict__[attribute] * _ureg(units) + else: + raise ValueError('`attribute` argument can only take the `scale` ' + 'or the `offset` value.') + + def _set_quantity(self, value, attribute='scale'): + if attribute == 'scale' or attribute == 'offset': + units = '' if self.units == t.Undefined else self.units + if isinstance(value, str): + value = _ureg.parse_expression(value) + if isinstance(value, float): + value = value * _ureg(units) + + # to be consistent, we also need to convert the other one + # (scale or offset) when both units differ. + if value.units != units and value.units != '' and units != '': + other = 'offset' if attribute == 'scale' else 'scale' + other_quantity = self._get_quantity(other).to(value.units) + self.__dict__[other] = float(other_quantity.magnitude) + + self.units = '{:~}'.format(value.units) + self.__dict__[attribute] = float(value.magnitude) + else: + raise ValueError('`attribute` argument can only take the `scale` ' + 'or the `offset` value.') + + @property + def units(self): + return self._units + + @units.setter + def units(self, s): + if s == '': + self._units = t.Undefined + self._units = s + + @add_gui_method(toolkey="DataAxis") -class DataAxis(t.HasTraits): +class DataAxis(t.HasTraits, UnitConversion): name = t.Str() units = t.Str() scale = t.Float() @@ -87,7 +228,7 @@ def __init__(self, offset=0., units=t.Undefined, navigate=t.Undefined): - super(DataAxis, self).__init__() + super().__init__() self.events = Events() self.events.index_changed = Event(""" Event that triggers when the index of the `DataAxis` changes @@ -200,6 +341,11 @@ def _get_index(self, value): else: return value + def _parse_string_for_slice(self, value): + if isinstance(value, str): + value = self._get_index_from_value_with_units(value) + return value + def _get_array_slices(self, slice_): """Returns a slice to slice the corresponding data axis without changing the offset and scale of the DataAxis. @@ -227,6 +373,10 @@ def _get_array_slices(self, slice_): stop = start + 1 step = None + start = self._parse_string_for_slice(start) + stop = self._parse_string_for_slice(stop) + step = self._parse_string_for_slice(step) + if isfloat(step): step = int(round(step / self.scale)) if isfloat(start): @@ -461,6 +611,32 @@ def update_from(self, axis, attributes=["scale", "offset", "units"]): any_changes = True return any_changes + @property + def scale_as_quantity(self): + return self._get_quantity('scale') + + @scale_as_quantity.setter + def scale_as_quantity(self, value): + self._set_quantity(value, 'scale') + + @property + def offset_as_quantity(self): + return self._get_quantity('offset') + + @offset_as_quantity.setter + def offset_as_quantity(self, value): + self._set_quantity(value, 'offset') + + @property + def units(self): + return self._units + + @units.setter + def units(self, s): + if s == '': + self._units = t.Undefined + self._units = s + @add_gui_method(toolkey="AxesManager") class AxesManager(t.HasTraits): @@ -825,6 +1001,103 @@ def _on_scale_changed(self): def _on_offset_changed(self): self.events.any_axis_changed.trigger(obj=self) + def convert_units(self, axes=None, units=None, same_units=True, + factor=0.25): + """ Convert the scale and the units of the selected axes. If the unit + of measure is not supported by the pint library, the scale and units + are not changed. + + Parameters + ---------- + axes : {int | string | iterable of `DataAxis` | None} + Default = None + Convert to a convenient scale and units on the specified axis. + If int, the axis can be specified using the index of the + axis in `axes_manager`. + If string, argument can be `navigation` or `signal` to select the + navigation or signal axes. The axis name can also be provided. + If `None`, convert all axes. + units : {list of string of the same length than axes | str | None} + Default = None + If list, the selected axes will be converted to the provided units. + If str, the navigation or signal axes will be converted to the + provided units. + If `None`, the scale and the units are converted to the appropriate + scale and units to avoid displaying scalebar with >3 digits or too + small number. This can be tweaked by the `factor` argument. + same_units : bool + If `True`, force to keep the same units if the units of + the axes differs. It only applies for the same kind of axis, + `navigation` or `signal`. By default the converted units of the + first axis is used for all axes. If `False`, convert all axes + individually. + %s + """ + convert_navigation = convert_signal = True + + if axes is None: + axes = self.navigation_axes + self.signal_axes + convert_navigation = (len(self.navigation_axes) > 0) + elif axes == 'navigation': + axes = self.navigation_axes + convert_signal = False + convert_navigation = (len(self.navigation_axes) > 0) + elif axes == 'signal': + axes = self.signal_axes + convert_navigation = False + elif isinstance(axes, (DataAxis, int, str)): + if not isinstance(axes, DataAxis): + axes = self[axes] + axes = (axes, ) + convert_navigation = axes[0].navigate + convert_signal = not convert_navigation + else: + raise TypeError( + 'Axes type `{}` is not correct.'.format(type(axes))) + + if isinstance(units, str) or units is None: + units = [units] * len(axes) + elif isinstance(units, list): + if len(units) != len(axes): + raise ValueError('Length of the provided units list {} should ' + 'be the same than the length of the provided ' + 'axes {}.'.format(units, axes)) + else: + raise TypeError('Units type `{}` is not correct. It can be a ' + '`string`, a `list` of string or `None`.' + ''.format(type(units))) + + if same_units: + if convert_navigation: + units_nav = units[:self.navigation_dimension] + self._convert_axes_to_same_units(self.navigation_axes, + units_nav, factor) + if convert_signal: + offset = self.navigation_dimension if convert_navigation else 0 + units_sig = units[offset:] + self._convert_axes_to_same_units(self.signal_axes, + units_sig, factor) + else: + for axis, unit in zip(axes, units): + axis.convert_to_units(unit, factor=factor) + + convert_units.__doc__ %= FACTOR_DOCSTRING + + def _convert_axes_to_same_units(self, axes, units, factor=0.25): + # Check if the units are supported + for axis in axes: + if axis._ignore_conversion(axis.units): + return + + # Set the same units for all axes, use the unit of the first axis + # as reference + axes[0].convert_to_units(units[0], factor=factor) + unit = axes[0].units # after conversion, in case units[0] was None. + for axis in axes[1:]: + # Convert only the units have the same dimensionality + if _ureg(axis.units).dimensionality == _ureg(unit).dimensionality: + axis.convert_to_units(unit, factor=factor) + def update_axes_attributes_from(self, axes, attributes=["scale", "offset", "units"]): """Update the axes attributes to match those given. @@ -925,27 +1198,62 @@ def set_signal_dimension(self, value): axis.navigate = tl.pop(0) def key_navigator(self, event): - if len(self.navigation_axes) not in (1, 2): + 'Set hotkeys for controlling the indices of the navigator plot' + + if self.navigation_dimension == 0: + # No hotkeys exist that do anything in this case return - x = self.navigation_axes[0] - try: - if event.key == "right" or event.key == "6": - x.index += self._step - elif event.key == "left" or event.key == "4": - x.index -= self._step - elif event.key == "pageup": - self._step += 1 - elif event.key == "pagedown": - if self._step > 1: - self._step -= 1 - if len(self.navigation_axes) == 2: - y = self.navigation_axes[1] - if event.key == "up" or event.key == "8": - y.index -= self._step - elif event.key == "down" or event.key == "2": - y.index += self._step - except TraitError: - pass + + # keyDict values are (axis_index, direction) + # Using arrow keys without Ctrl will be deprecated in 2.0 + mod01 = preferences.Plot.modifier_dims_01 + mod23 = preferences.Plot.modifier_dims_23 + mod45 = preferences.Plot.modifier_dims_45 + + dim0_decrease = mod01 + '+' + preferences.Plot.dims_024_decrease + dim0_increase = mod01 + '+' + preferences.Plot.dims_024_increase + dim1_decrease = mod01 + '+' + preferences.Plot.dims_135_decrease + dim1_increase = mod01 + '+' + preferences.Plot.dims_135_increase + dim2_decrease = mod23 + '+' + preferences.Plot.dims_024_decrease + dim2_increase = mod23 + '+' + preferences.Plot.dims_024_increase + dim3_decrease = mod23 + '+' + preferences.Plot.dims_135_decrease + dim3_increase = mod23 + '+' + preferences.Plot.dims_135_increase + dim4_decrease = mod45 + '+' + preferences.Plot.dims_024_decrease + dim4_increase = mod45 + '+' + preferences.Plot.dims_024_increase + dim5_decrease = mod45 + '+' + preferences.Plot.dims_135_decrease + dim5_increase = mod45 + '+' + preferences.Plot.dims_135_increase + + keyDict = { + # axes 0, 1 + **dict.fromkeys(['left', dim0_decrease, '4'], (0, -1)), + **dict.fromkeys(['right', dim0_increase, '6'], (0, +1)), + **dict.fromkeys(['up', dim1_decrease, '8'], (1, -1)), + **dict.fromkeys(['down', dim1_increase, '2'], (1, +1)), + # axes 2, 3 + **dict.fromkeys([dim2_decrease], (2, -1)), + **dict.fromkeys([dim2_increase], (2, +1)), + **dict.fromkeys([dim3_decrease], (3, -1)), + **dict.fromkeys([dim3_increase], (3, +1)), + # axes 4, 5 + **dict.fromkeys([dim4_decrease], (4, -1)), + **dict.fromkeys([dim4_increase], (4, +1)), + **dict.fromkeys([dim5_decrease], (5, -1)), + **dict.fromkeys([dim5_increase], (5, +1)), + } + + if event.key == 'pageup': + self._step += 1 + elif event.key == 'pagedown': + if self._step > 1: + self._step -= 1 + else: + try: + # may raise keyerror + axes_index, direction = keyDict[event.key] + axes = self.navigation_axes[axes_index] # may raise indexerror + axes.index += direction * self._step # may raise traiterror + except (KeyError, IndexError, TraitError): + pass def copy(self): return copy.copy(self) diff --git a/hyperspy/component.py b/hyperspy/component.py index 54fd16ad1f..1fb48e5fbf 100644 --- a/hyperspy/component.py +++ b/hyperspy/component.py @@ -1135,6 +1135,10 @@ def as_dictionary(self, fullcopy=True): 'parameters': [ p.as_dictionary(fullcopy) for p in self.parameters]} export_to_dictionary(self, self._whitelist, dic, fullcopy) + from hyperspy.model import components + if self._id_name not in components.__dict__.keys(): + import dill + dic['_class_dump'] = dill.dumps(self.__class__) return dic def _load_dictionary(self, dic): diff --git a/hyperspy/datasets/__init__.py b/hyperspy/datasets/__init__.py index 5916f5bd14..aa66b40688 100644 --- a/hyperspy/datasets/__init__.py +++ b/hyperspy/datasets/__init__.py @@ -18,3 +18,4 @@ """ from hyperspy.misc.eels.eelsdb import eelsdb +from hyperspy.datasets import artificial_data diff --git a/hyperspy/datasets/artificial_data.py b/hyperspy/datasets/artificial_data.py new file mode 100644 index 0000000000..abd0d3c62d --- /dev/null +++ b/hyperspy/datasets/artificial_data.py @@ -0,0 +1,279 @@ +import numpy as np +from hyperspy import components1d, components2d +from hyperspy.signals import EELSSpectrum, Signal2D + + +def get_low_loss_eels_signal(): + """Get an artificial low loss electron energy loss spectrum. + + The zero loss peak is offset by 4.1 eV. + + Returns + ------- + artificial_low_loss_signal : HyperSpy EELSSpectrum + + Example + ------- + >>> s = hs.datasets.artificial_data.get_low_loss_eels_signal() + >>> s.plot() + + See also + -------- + get_core_loss_eels_signal : get a core loss signal + get_core_loss_eels_model : get a core loss model + get_low_loss_eels_line_scan_signal : get EELS low loss line scan + get_core_loss_eels_line_scan_signal : get EELS core loss line scan + + """ + x = np.arange(-100, 400, 0.5) + zero_loss = components1d.Gaussian(A=100, centre=4.1, sigma=1) + plasmon = components1d.Gaussian(A=100, centre=60, sigma=20) + + data = zero_loss.function(x) + data += plasmon.function(x) + data += np.random.random(size=len(x)) * 0.7 + + s = EELSSpectrum(data) + s.axes_manager[0].offset = x[0] + s.axes_manager[0].scale = x[1] - x[0] + s.metadata.General.title = 'Artifical low loss EEL spectrum' + s.axes_manager[0].name = 'Electron energy loss' + s.axes_manager[0].units = 'eV' + s.set_microscope_parameters( + beam_energy=200, convergence_angle=26, collection_angle=20) + return s + + +def get_core_loss_eels_signal(add_powerlaw=False): + """Get an artificial core loss electron energy loss spectrum. + + Similar to a Mn-L32 edge from a perovskite oxide. + + Some random noise is also added to the spectrum, to simulate + experimental noise. + + Parameters + ---------- + add_powerlaw : bool + If True, adds a powerlaw background to the spectrum. + Default False. + + Returns + ------- + artificial_core_loss_signal : HyperSpy EELSSpectrum + + Example + ------- + >>> import hs.datasets.artifical_data as ad + >>> s = ad.get_core_loss_eels_signal() + >>> s.plot() + + With the powerlaw background + + >>> s = ad.get_core_loss_eels_signal(add_powerlaw=True) + >>> s.plot() + + To make the noise the same for multiple spectra, which can + be useful for testing fitting routines + + >>> np.random.seed(seed=10) + >>> s1 = ad.get_core_loss_eels_signal() + >>> np.random.seed(seed=10) + >>> s2 = ad.get_core_loss_eels_signal() + >>> (s1.data == s2.data).all() + True + + See also + -------- + get_low_loss_eels_model : get a low loss signal + get_core_loss_eels_model : get a model instead of a signal + get_low_loss_eels_line_scan_signal : get EELS low loss line scan + get_core_loss_eels_line_scan_signal : get EELS core loss line scan + + """ + x = np.arange(400, 800, 1) + arctan = components1d.Arctan(A=1, k=0.2, x0=688) + arctan.minimum_at_zero = True + mn_l3_g = components1d.Gaussian(A=100, centre=695, sigma=4) + mn_l2_g = components1d.Gaussian(A=20, centre=720, sigma=4) + + data = arctan.function(x) + data += mn_l3_g.function(x) + data += mn_l2_g.function(x) + data += np.random.random(size=len(x)) * 0.7 + + if add_powerlaw: + powerlaw = components1d.PowerLaw(A=10e8, r=3, origin=0) + data += powerlaw.function(x) + + s = EELSSpectrum(data) + s.axes_manager[0].offset = x[0] + s.metadata.General.title = 'Artifical core loss EEL spectrum' + s.axes_manager[0].name = 'Electron energy loss' + s.axes_manager[0].units = 'eV' + s.set_microscope_parameters( + beam_energy=200, convergence_angle=26, collection_angle=20) + return s + + +def get_low_loss_eels_line_scan_signal(): + """Get an artificial low loss electron energy loss line scan spectrum. + + The zero loss peak is offset by 4.1 eV. + + Returns + ------- + artificial_low_loss_line_scan_signal : HyperSpy EELSSpectrum + + Example + ------- + >>> s = hs.datasets.artificial_data.get_low_loss_eels_signal() + >>> s.plot() + + See also + -------- + get_core_loss_eels_signal : get a core loss signal + get_core_loss_eels_model : get a core loss model + get_core_loss_eels_line_scan_signal : core loss signal with the same size + + """ + x = np.arange(-100, 400, 0.5) + zero_loss = components1d.Gaussian(A=100, centre=4.1, sigma=1) + plasmon = components1d.Gaussian(A=100, centre=60, sigma=20) + + data_signal = zero_loss.function(x) + data_signal += plasmon.function(x) + data = np.zeros((12, len(x))) + for i in range(12): + data[i] += data_signal + data[i] += np.random.random(size=len(x)) * 0.7 + + s = EELSSpectrum(data) + s.axes_manager.signal_axes[0].offset = x[0] + s.axes_manager.signal_axes[0].scale = x[1] - x[0] + s.metadata.General.title = 'Artifical low loss EEL spectrum' + s.axes_manager.signal_axes[0].name = 'Electron energy loss' + s.axes_manager.signal_axes[0].units = 'eV' + s.axes_manager.navigation_axes[0].name = 'Probe position' + s.axes_manager.navigation_axes[0].units = 'nm' + s.set_microscope_parameters( + beam_energy=200, convergence_angle=26, collection_angle=20) + return s + + +def get_core_loss_eels_line_scan_signal(): + """Get an artificial core loss electron energy loss line scan spectrum. + + Similar to a Mn-L32 and Fe-L32 edge from a perovskite oxide. + + Returns + ------- + artificial_core_loss_line_scan_signal : HyperSpy EELSSpectrum + + Example + ------- + >>> s = hs.datasets.artificial_data.get_core_loss_eels_line_scan_signal() + >>> s.plot() + + See also + -------- + get_low_loss_eels_model : get a low loss signal + get_core_loss_eels_model : get a model instead of a signal + get_low_loss_eels_line_scan_signal : get low loss signal with the same size + + """ + x = np.arange(400, 800, 1) + arctan_mn = components1d.Arctan(A=1, k=0.2, x0=688) + arctan_mn.minimum_at_zero = True + arctan_fe = components1d.Arctan(A=1, k=0.2, x0=612) + arctan_fe.minimum_at_zero = True + mn_l3_g = components1d.Gaussian(A=100, centre=695, sigma=4) + mn_l2_g = components1d.Gaussian(A=20, centre=720, sigma=4) + fe_l3_g = components1d.Gaussian(A=100, centre=605, sigma=4) + fe_l2_g = components1d.Gaussian(A=10, centre=630, sigma=3) + + mn_intensity = [1, 1, 1, 1, 1, 1, 0.8, 0.5, 0.2, 0, 0, 0] + fe_intensity = [0, 0, 0, 0, 0, 0, 0.2, 0.5, 0.8, 1, 1, 1] + data = np.zeros((len(mn_intensity), len(x))) + for i in range(len(mn_intensity)): + data[i] += arctan_mn.function(x) * mn_intensity[i] + data[i] += mn_l3_g.function(x) * mn_intensity[i] + data[i] += mn_l2_g.function(x) * mn_intensity[i] + data[i] += arctan_fe.function(x) * fe_intensity[i] + data[i] += fe_l3_g.function(x) * fe_intensity[i] + data[i] += fe_l2_g.function(x) * fe_intensity[i] + data[i] += np.random.random(size=len(x)) * 0.7 + + s = EELSSpectrum(data) + s.axes_manager.signal_axes[0].offset = x[0] + s.metadata.General.title = 'Artifical core loss EEL spectrum' + s.axes_manager.signal_axes[0].name = 'Electron energy loss' + s.axes_manager.signal_axes[0].units = 'eV' + s.axes_manager.navigation_axes[0].name = 'Probe position' + s.axes_manager.navigation_axes[0].units = 'nm' + s.set_microscope_parameters( + beam_energy=200, convergence_angle=26, collection_angle=20) + return s + + +def get_core_loss_eels_model(add_powerlaw=False): + """Get an artificial core loss electron energy loss model. + + Similar to a Mn-L32 edge from a perovskite oxide. + + Parameters + ---------- + add_powerlaw : bool + If True, adds a powerlaw background to the spectrum. + Default False. + + Returns + ------- + artificial_core_loss_model : HyperSpy EELSModel + + Example + ------- + >>> import hs.datasets.artifical_data as ad + >>> s = ad.get_core_loss_eels_model() + >>> s.plot() + + With the powerlaw background + + >>> s = ad.get_core_loss_eels_model(add_powerlaw=True) + >>> s.plot() + + See also + -------- + get_low_loss_eels_model : get a low loss signal + get_core_loss_eels_signal : get a model instead of a signal + + """ + s = get_core_loss_eels_signal(add_powerlaw=add_powerlaw) + m = s.create_model(auto_background=False, GOS='hydrogenic') + return m + + +def get_atomic_resolution_tem_signal2d(): + """Get an artificial atomic resolution TEM Signal2D. + + Returns + ------- + artificial_tem_image : HyperSpy Signal2D + + Example + ------- + >>> s = hs.datasets.artificial_data.get_atomic_resolution_tem_signal2d() + >>> s.plot() + + """ + sX, sY = 2, 2 + x_array, y_array = np.mgrid[0:200, 0:200] + image = np.zeros_like(x_array, dtype=np.float32) + gaussian2d = components2d.Gaussian2D(sigma_x=sX, sigma_y=sY) + for x in range(10, 195, 20): + for y in range(10, 195, 20): + gaussian2d.centre_x.value = x + gaussian2d.centre_y.value = y + image += gaussian2d.function(x_array, y_array) + s = Signal2D(image) + return s diff --git a/hyperspy/decorators.py b/hyperspy/decorators.py index d9341ba775..2d399fa60e 100644 --- a/hyperspy/decorators.py +++ b/hyperspy/decorators.py @@ -19,11 +19,6 @@ # custom exceptions from functools import wraps -from hyperspy.exceptions import NoInteractiveError -from hyperspy.defaults_parser import preferences -from hyperspy.signal_tools import Signal1DRangeSelector -from hyperspy.ui_registry import get_gui - def lazify(func, **kwargs): from hyperspy.signal import BaseSignal @@ -99,6 +94,9 @@ def new_decorator(f): @simple_decorator def interactive_range_selector(cm): + from hyperspy.ui_registry import get_gui + from hyperspy.signal_tools import Signal1DRangeSelector + def wrapper(self, *args, **kwargs): if not args and not kwargs: range_selector = Signal1DRangeSelector(self) @@ -107,3 +105,17 @@ def wrapper(self, *args, **kwargs): else: cm(self, *args, **kwargs) return wrapper + + +def jit_ifnumba(*args, **kwargs): + try: + import numba + if "nopython" not in kwargs: + kwargs["nopython"] = True + return numba.jit(*args, **kwargs) + except ImportError: + def wrap1(func): + def wrap2(*args2, **kwargs2): + return func(*args2, **kwargs2) + return wrap2 + return wrap1 diff --git a/hyperspy/defaults_parser.py b/hyperspy/defaults_parser.py index c73d217238..1253ac8013 100644 --- a/hyperspy/defaults_parser.py +++ b/hyperspy/defaults_parser.py @@ -137,6 +137,27 @@ class GUIs(t.HasTraits): desc="Display warnings, if hyperspy_gui_ipywidgets or hyperspy_gui_traitsui are missing.") +class PlotConfig(t.HasTraits): + dims_024_increase = t.Str('right', + label='Navigate right' + ) + dims_024_decrease = t.Str('left', + label='Navigate left', + ) + dims_135_increase = t.Str('down', + label='Navigate down', + ) + dims_135_decrease = t.Str('up', + label='Navigate up', + ) + modifier_dims_01 = t.Enum(['ctrl', 'alt', 'shift', 'ctrl+alt', 'ctrl+shift', 'alt+shift', + 'ctrl+alt+shift'], label='Modifier key for 1st and 2nd dimensions') # 0 elem is default + modifier_dims_23 = t.Enum(['shift', 'alt', 'ctrl', 'ctrl+alt', 'ctrl+shift', 'alt+shift', + 'ctrl+alt+shift'], label='Modifier key for 3rd and 4th dimensions') # 0 elem is default + modifier_dims_45 = t.Enum(['alt', 'ctrl', 'shift', 'ctrl+alt', 'ctrl+shift', 'alt+shift', + 'ctrl+alt+shift'], label='Modifier key for 5th and 6th dimensions') # 0 elem is default + + class EDSConfig(t.HasTraits): eds_mn_ka = t.CFloat(130., label='Energy resolution at Mn Ka (eV)', @@ -163,6 +184,7 @@ class EDSConfig(t.HasTraits): 'GUIs': GUIs(), 'EELS': EELSConfig(), 'EDS': EDSConfig(), + 'Plot': PlotConfig(), } # Set the enums defaults @@ -234,6 +256,7 @@ class Preferences(t.HasTraits): EDS = t.Instance(EDSConfig) General = t.Instance(GeneralConfig) GUIs = t.Instance(GUIs) + Plot = t.Instance(PlotConfig) def save(self): config = configparser.ConfigParser(allow_no_value=True) @@ -246,6 +269,7 @@ def save(self): EDS=template['EDS'], General=template['General'], GUIs=template['GUIs'], + Plot=template['Plot'], ) if preferences.General.logger_on: diff --git a/hyperspy/drawing/_widgets/circle.py b/hyperspy/drawing/_widgets/circle.py index e1e255dada..ead4031403 100644 --- a/hyperspy/drawing/_widgets/circle.py +++ b/hyperspy/drawing/_widgets/circle.py @@ -112,6 +112,7 @@ def _set_patch(self): fill=False, lw=self.border_thickness, ec=self.color, + alpha=self.alpha, picker=True,)] if ri > 0: self.patch.append( @@ -121,6 +122,7 @@ def _set_patch(self): fill=False, lw=self.border_thickness, ec=self.color, + alpha=self.alpha, picker=True,)) def _validate_pos(self, value): @@ -154,7 +156,7 @@ def _update_patch_size(self): if ri > 0: # Add the inner circle if len(self.patch) == 1: - # Need to remove the previous patch before using + # Need to remove the previous patch before using # `_add_patch_to` self.ax.artists.remove(self.patch[0]) self.patch = [] diff --git a/hyperspy/drawing/_widgets/horizontal_line.py b/hyperspy/drawing/_widgets/horizontal_line.py index bc8b136513..f317cafaf1 100644 --- a/hyperspy/drawing/_widgets/horizontal_line.py +++ b/hyperspy/drawing/_widgets/horizontal_line.py @@ -34,6 +34,7 @@ def _set_patch(self): self.patch = [ax.axhline( self._pos[0], color=self.color, + alpha=self.alpha, picker=5)] def _onmousemove(self, event): diff --git a/hyperspy/drawing/_widgets/label.py b/hyperspy/drawing/_widgets/label.py index eea936901e..277ad70165 100644 --- a/hyperspy/drawing/_widgets/label.py +++ b/hyperspy/drawing/_widgets/label.py @@ -24,18 +24,17 @@ class LabelWidget(Widget1DBase): - """A draggable text widget. Adds the attributes 'string', 'text_color' and - 'bbox'. These are all arguments for matplotlib's Text artist. The default + """A draggable text widget. Adds the attributes 'string' and 'bbox'. + These are all arguments for matplotlib's Text artist. The default y-coordinate of the label is set to 0.9. """ - def __init__(self, axes_manager): - super(LabelWidget, self).__init__(axes_manager) + def __init__(self, axes_manager, color='black', **kwargs): + super(LabelWidget, self).__init__(axes_manager, color=color, **kwargs) self._string = '' self._snap_position = False if not self.axes: self._pos = np.array((0, 0.9)) - self.text_color = 'black' self.bbox = None def _get_string(self): @@ -98,7 +97,8 @@ def _set_patch(self): self._pos[0], self._pos[1], self.string, - color=self.text_color, + color=self.color, + alpha=self.alpha, picker=5, transform=trans, horizontalalignment='left', diff --git a/hyperspy/drawing/_widgets/line2d.py b/hyperspy/drawing/_widgets/line2d.py index ca2ce93c3a..c78785eb61 100644 --- a/hyperspy/drawing/_widgets/line2d.py +++ b/hyperspy/drawing/_widgets/line2d.py @@ -93,8 +93,8 @@ class Line2DWidget(ResizableDraggableWidgetBase): FUNC_A = 32 # Resize/rotate by first vertex FUNC_B = 64 # Resize/rotate by second vertex - def __init__(self, axes_manager): - super(Line2DWidget, self).__init__(axes_manager) + def __init__(self, axes_manager, **kwargs): + super(Line2DWidget, self).__init__(axes_manager, **kwargs) self.linewidth = 1 self.radius_move = self.radius_resize = 5 self.radius_rotate = 15 @@ -255,6 +255,7 @@ def _set_patch(self): animated=self.blit, lw=self.linewidth, c=self.color, + alpha=self.alpha, marker='s', markersize=self.radius_resize, mew=0.1, diff --git a/hyperspy/drawing/_widgets/range.py b/hyperspy/drawing/_widgets/range.py index 5b01dcde53..b16c16899a 100644 --- a/hyperspy/drawing/_widgets/range.py +++ b/hyperspy/drawing/_widgets/range.py @@ -18,7 +18,8 @@ import numpy as np -import matplotlib +from matplotlib.widgets import SpanSelector +import inspect import logging from hyperspy.drawing.widgets import ResizableDraggableWidgetBase @@ -51,8 +52,13 @@ class RangeWidget(ResizableDraggableWidgetBase): will always stay within bounds. """ - def __init__(self, axes_manager): - super(RangeWidget, self).__init__(axes_manager) + def __init__(self, axes_manager, ax=None, alpha=0.5, **kwargs): + # Parse all kwargs for the matplotlib SpanSelector + self._SpanSelector_kwargs = {} + for key in inspect.signature(SpanSelector).parameters.keys(): + if key in kwargs: + self._SpanSelector_kwargs[key] = kwargs.pop(key) + super(RangeWidget, self).__init__(axes_manager, alpha=alpha, **kwargs) self.span = None def set_on(self, value): @@ -64,14 +70,14 @@ def set_on(self, value): self.disconnect() try: self.ax.figure.canvas.draw_idle() - except: # figure does not exist + except BaseException: # figure does not exist pass if value is False: self.ax = None - self._WidgetBase__is_on = value + self.__is_on = value def _add_patch_to(self, ax): - self.span = ModifiableSpanSelector(ax) + self.span = ModifiableSpanSelector(ax, **self._SpanSelector_kwargs) self.span.set_initial(self._get_range()) self.span.bounds_check = True self.span.snap_position = self.snap_position @@ -81,6 +87,8 @@ def _add_patch_to(self, ax): self.span.step_ax = self.axes[0] self.span.tolerance = 5 self.patch = [self.span.rect] + self.patch[0].set_color(self.color) + self.patch[0].set_alpha(self.alpha) def _span_changed(self, widget): r = self._get_range() @@ -260,12 +268,14 @@ def _validate_geometry(self, x1=None): self._do_snap_size() -class ModifiableSpanSelector(matplotlib.widgets.SpanSelector): +class ModifiableSpanSelector(SpanSelector): def __init__(self, ax, **kwargs): - onsel = kwargs.pop('onselect', self.dummy) - matplotlib.widgets.SpanSelector.__init__( - self, ax, onsel, direction='horizontal', useblit=False, **kwargs) + onselect = kwargs.pop('onselect', self.dummy) + direction = kwargs.pop('direction', 'horizontal') + useblit = kwargs.pop('useblit', ax.figure.canvas.supports_blit) + SpanSelector.__init__(self, ax, onselect, direction=direction, + useblit=useblit, span_stays=False, **kwargs) # The tolerance in points to pick the rectangle sizes self.tolerance = 1 self.on_move_cid = None @@ -320,17 +330,47 @@ def _set_range(self, value): moved = self._range[0] != value[0] self._range = value if moved: - self.rect.set_x(value[0]) + self._set_span_x(value[0]) self.events.moved.trigger(self) if resized: - self.rect.set_width(value[1] - value[0]) + self._set_span_width(value[1] - value[0]) self.events.resized.trigger(self) if moved or resized: - self.update() + self.draw_patch() self.events.changed.trigger(self) range = property(_get_range, _set_range) + def _set_span_x(self, value): + if self.direction == 'horizontal': + self.rect.set_x(value) + else: + self.rect.set_y(value) + + def _set_span_width(self, value): + if self.direction == 'horizontal': + self.rect.set_width(value) + else: + self.rect.set_height(value) + + def _get_span_x(self): + if self.direction == 'horizontal': + return self.rect.get_x() + else: + return self.rect.get_y() + + def _get_span_width(self): + if self.direction == 'horizontal': + return self.rect.get_width() + else: + return self.rect.get_height() + + def _get_mouse_position(self, event): + if self.direction == 'horizontal': + return event.xdata + else: + return event.ydata + def set_initial(self, initial_range=None): """ Remove selection events, set the spanner, and go to modify mode. @@ -338,31 +378,41 @@ def set_initial(self, initial_range=None): if initial_range is not None: self.range = initial_range - for cid in self.cids: - self.canvas.mpl_disconnect(cid) + self.disconnect_events() # And connect to the new ones - self.cids.append( - self.canvas.mpl_connect('button_press_event', self.mm_on_press)) - self.cids.append( - self.canvas.mpl_connect('button_release_event', - self.mm_on_release)) - self.cids.append( - self.canvas.mpl_connect('draw_event', self.update_background)) + self.connect_event('button_press_event', self.mm_on_press) + self.connect_event('button_release_event', self.mm_on_release) + self.connect_event('draw_event', self.update_background) + self.rect.set_visible(True) self.rect.contains = self.contains - self.update() + + def update(self, *args): + # Override the SpanSelector `update` method to blit properly all + # artirts before we go to "modify mode" in `set_initial`. + self.draw_patch() + + def draw_patch(self, *args): + """Update the patch drawing. + """ + try: + if self.useblit and hasattr(self.ax, 'hspy_fig'): + self.ax.hspy_fig._update_animated() + elif self.ax.figure is not None: + self.ax.figure.canvas.draw_idle() + except AttributeError: + pass # When figure is None, typically when closing def contains(self, mouseevent): x, y = self.rect.get_transform().inverted().transform_point( (mouseevent.x, mouseevent.y)) + v = x if self.direction == 'vertical' else y # Assert y is correct first - if not (0.0 <= y <= 1.0): + if not (0.0 <= v <= 1.0): return False, {} - invtrans = self.ax.transData.inverted() - x_pt = self.tolerance * abs((invtrans.transform((1, 0)) - - invtrans.transform((0, 0)))[0]) + x_pt = self._get_point_size_in_data_units() hit = self._range[0] - x_pt, self._range[1] + x_pt - if hit[0] < mouseevent.xdata < hit[1]: + if hit[0] < self._get_mouse_position < hit[1]: return True, {} return False, {} @@ -373,18 +423,22 @@ def release(self, event): return self.buttonDown = False self.update_range() - self.onselect() self.set_initial() + def _get_point_size_in_data_units(self): + # Calculate the point size in data units + invtrans = self.ax.transData.inverted() + (x, y) = (1, 0) if self.direction == 'horizontal' else (0, 1) + x_pt = self.tolerance * abs((invtrans.transform((x, y)) - + invtrans.transform((0, 0)))[y]) + return x_pt + def mm_on_press(self, event): if self.ignore(event) and not self.buttonDown: return self.buttonDown = True - # Calculate the point size in data units - invtrans = self.ax.transData.inverted() - x_pt = self.tolerance * abs((invtrans.transform((1, 0)) - - invtrans.transform((0, 0)))[0]) + x_pt = self._get_point_size_in_data_units() # Determine the size of the regions for moving and stretching self.update_range() @@ -392,16 +446,16 @@ def mm_on_press(self, event): right_region = self._range[1] - x_pt, self._range[1] + x_pt middle_region = self._range[0] + x_pt, self._range[1] - x_pt - if in_interval(event.xdata, left_region) is True: + if in_interval(self._get_mouse_position(event), left_region) is True: self.on_move_cid = \ self.canvas.mpl_connect('motion_notify_event', self.move_left) - elif in_interval(event.xdata, right_region): + elif in_interval(self._get_mouse_position(event), right_region): self.on_move_cid = \ self.canvas.mpl_connect('motion_notify_event', self.move_right) - elif in_interval(event.xdata, middle_region): - self.pressv = event.xdata + elif in_interval(self._get_mouse_position(event), middle_region): + self.pressv = self._get_mouse_position(event) self.on_move_cid = \ self.canvas.mpl_connect('motion_notify_event', self.move_rect) @@ -409,8 +463,8 @@ def mm_on_press(self, event): return def update_range(self): - self._range = (self.rect.get_x(), - self.rect.get_x() + self.rect.get_width()) + self._range = (self._get_span_x(), + self._get_span_x() + self._get_span_width()) def switch_left_right(self, x, left_to_right): if left_to_right: @@ -419,7 +473,7 @@ def switch_left_right(self, x, left_to_right): return w = self._range[1] - self._range[0] r0 = self._range[1] - self.rect.set_x(r0) + self._set_span_x(r0) r1 = r0 + w self.canvas.mpl_disconnect(self.on_move_cid) self.on_move_cid = \ @@ -441,7 +495,7 @@ def switch_left_right(self, x, left_to_right): def move_left(self, event): if self.buttonDown is False or self.ignore(event): return - x = event.xdata + x = self._get_mouse_position(event) if self.step_ax is not None: if (self.bounds_check and x < self.step_ax.low_value - self.step_ax.scale): @@ -464,22 +518,22 @@ def move_left(self, event): self.move_right(event) return width_increment = self._range[0] - x - if self.rect.get_width() + width_increment <= 0: + if self._get_span_width() + width_increment <= 0: return - self.rect.set_x(x) - self.rect.set_width(self.rect.get_width() + width_increment) + self._set_span_x(x) + self._set_span_width(self._get_span_width() + width_increment) self.update_range() self.events.moved.trigger(self) self.events.resized.trigger(self) self.events.changed.trigger(self) if self.onmove_callback is not None: self.onmove_callback(*self._range) - self.update() + self.draw_patch() def move_right(self, event): if self.buttonDown is False or self.ignore(event): return - x = event.xdata + x = self._get_mouse_position(event) if self.step_ax is not None: if (self.bounds_check and x > self.step_ax.high_value + self.step_ax.scale): @@ -499,20 +553,20 @@ def move_right(self, event): self.move_left(event) return width_increment = x - self._range[1] - if self.rect.get_width() + width_increment <= 0: + if self._get_span_width() + width_increment <= 0: return - self.rect.set_width(self.rect.get_width() + width_increment) + self._set_span_width(self._get_span_width() + width_increment) self.update_range() self.events.resized.trigger(self) self.events.changed.trigger(self) if self.onmove_callback is not None: self.onmove_callback(*self._range) - self.update() + self.draw_patch() def move_rect(self, event): if self.buttonDown is False or self.ignore(event): return - x_increment = event.xdata - self.pressv + x_increment = self._get_mouse_position(event) - self.pressv if self.step_ax is not None: if self.snap_position: rem = x_increment % self.step_ax.scale @@ -521,14 +575,14 @@ def move_rect(self, event): else: rem = self.step_ax.scale - rem x_increment += rem - self.rect.set_x(self.rect.get_x() + x_increment) + self._set_span_x(self._get_span_x() + x_increment) self.update_range() self.pressv += x_increment self.events.moved.trigger(self) self.events.changed.trigger(self) if self.onmove_callback is not None: self.onmove_callback(*self._range) - self.update() + self.draw_patch() def mm_on_release(self, event): if self.buttonDown is False or self.ignore(event): @@ -538,9 +592,8 @@ def mm_on_release(self, event): self.on_move_cid = None def turn_off(self): - for cid in self.cids: - self.canvas.mpl_disconnect(cid) + self.disconnect_events() if self.on_move_cid is not None: - self.canvas.mpl_disconnect(cid) + self.canvas.mpl_disconnect(self.on_move_cid) self.ax.patches.remove(self.rect) self.ax.figure.canvas.draw_idle() diff --git a/hyperspy/drawing/_widgets/rectangles.py b/hyperspy/drawing/_widgets/rectangles.py index 106a720d63..b0218ab776 100644 --- a/hyperspy/drawing/_widgets/rectangles.py +++ b/hyperspy/drawing/_widgets/rectangles.py @@ -37,8 +37,8 @@ class SquareWidget(Widget2DBase): bounds only correspond to pure indices for odd sizes. """ - def __init__(self, axes_manager): - super(SquareWidget, self).__init__(axes_manager) + def __init__(self, axes_manager, **kwargs): + super(SquareWidget, self).__init__(axes_manager, **kwargs) def _set_patch(self): """Sets the patch to a matplotlib Rectangle with the correct geometry. @@ -52,6 +52,7 @@ def _set_patch(self): fill=False, lw=self.border_thickness, ec=self.color, + alpha=self.alpha, picker=True,)] super(SquareWidget, self)._set_patch() diff --git a/hyperspy/drawing/_widgets/vertical_line.py b/hyperspy/drawing/_widgets/vertical_line.py index 295c57b1b2..3c4b9756ac 100644 --- a/hyperspy/drawing/_widgets/vertical_line.py +++ b/hyperspy/drawing/_widgets/vertical_line.py @@ -33,6 +33,7 @@ def _set_patch(self): ax = self.ax self.patch = [ax.axvline(self._pos[0], color=self.color, + alpha=self.alpha, picker=5)] def _onmousemove(self, event): diff --git a/hyperspy/drawing/image.py b/hyperspy/drawing/image.py index b8734dd409..4ef3ae1158 100644 --- a/hyperspy/drawing/image.py +++ b/hyperspy/drawing/image.py @@ -43,7 +43,7 @@ class ImagePlot(BlittedFigure): arguments. pixel_units : {None, string} The pixel units for the scale bar. Normally - scalebar, colorbar, plot_indices : bool + scalebar, plot_ticks, colorbar, plot_indices : bool title : str The title is printed at the top of the image. vmin, vmax : float @@ -83,6 +83,7 @@ def __init__(self): self._text = None self._text_position = (0, 1.05,) self.axes_manager = None + self.axes_off = False self._aspect = 1 self._extent = None self.xaxis = None @@ -123,7 +124,10 @@ def vmin(self, vmin): @property def axes_ticks(self): if self._user_axes_ticks is None: - return self._auto_axes_ticks + if self.scalebar is False: + return True + else: + return self._auto_axes_ticks else: return self._user_axes_ticks @@ -148,14 +152,6 @@ def scalebar(self, value): def configure(self): xaxis = self.xaxis yaxis = self.yaxis - # Signal2D labels - self._xlabel = '%s' % str(xaxis) - if xaxis.units is not Undefined: - self._xlabel += ' (%s)' % xaxis.units - - self._ylabel = '%s' % str(yaxis) - if yaxis.units is not Undefined: - self._ylabel += ' (%s)' % yaxis.units if (xaxis.units == yaxis.units) and (xaxis.scale == yaxis.scale): self._auto_scalebar = True @@ -165,6 +161,15 @@ def configure(self): self._auto_scalebar = False self._auto_axes_ticks = True + # Signal2D labels + self._xlabel = '{}'.format(xaxis) + if xaxis.units is not Undefined: + self._xlabel += ' ({})'.format(xaxis.units) + + self._ylabel = '{}'.format(yaxis) + if yaxis.units is not Undefined: + self._ylabel += ' ({})'.format(yaxis.units) + # Calibrate the axes of the navigator image self._extent = (xaxis.axis[0] - xaxis.scale / 2., xaxis.axis[-1] + xaxis.scale / 2., @@ -237,6 +242,8 @@ def create_axis(self): self.ax.set_xticks([]) self.ax.set_yticks([]) self.ax.hspy_fig = self + if self.axes_off: + self.ax.axis('off') def plot(self, **kwargs): self.configure() diff --git a/hyperspy/drawing/marker.py b/hyperspy/drawing/marker.py index 60f12f9006..b687753bec 100644 --- a/hyperspy/drawing/marker.py +++ b/hyperspy/drawing/marker.py @@ -177,8 +177,8 @@ def plot(self, render_figure=True): render_figure : bool, optional, default True If True, will render the figure after adding the marker. If False, the marker will be added to the plot, but will the figure - will not be rendered. This is useful when plotting many markers, - since rendering the figure after adding each marker will slow + will not be rendered. This is useful when plotting many markers, + since rendering the figure after adding each marker will slow things down. """ if self.ax is None: diff --git a/hyperspy/drawing/mpl_hie.py b/hyperspy/drawing/mpl_hie.py index 4a160345c4..619265fd43 100644 --- a/hyperspy/drawing/mpl_hie.py +++ b/hyperspy/drawing/mpl_hie.py @@ -27,6 +27,7 @@ def plot_signal(self, scalebar=True, scalebar_color="white", axes_ticks=None, + axes_off=False, saturated_pixels=0, vmin=None, vmax=None, @@ -49,6 +50,8 @@ def plot_signal(self, If True, plot the axes ticks. If None axes_ticks are only plotted when the scale bar is not plotted. If False the axes ticks are never plotted. + axes_off : bool, optional + If True, the axes labels are not plotted. saturated_pixels: scalar The percentage of pixels that are left out of the bounds. For example, the low and high bounds of a value of 1 are the @@ -73,6 +76,7 @@ def plot_signal(self, imf.quantity_label = self.quantity_label imf.scalebar = scalebar imf.axes_ticks = axes_ticks + imf.axes_off = axes_off imf.vmin, imf.vmax = vmin, vmax imf.saturated_pixels = saturated_pixels imf.no_nans = no_nans diff --git a/hyperspy/drawing/mpl_hse.py b/hyperspy/drawing/mpl_hse.py index abb244285b..373f17a2cc 100644 --- a/hyperspy/drawing/mpl_hse.py +++ b/hyperspy/drawing/mpl_hse.py @@ -23,7 +23,7 @@ from traits.api import Undefined from hyperspy.drawing.mpl_he import MPL_HyperExplorer -from hyperspy.drawing import signal1d, utils +from hyperspy.drawing import signal1d class MPL_HyperSignal1D_Explorer(MPL_HyperExplorer): @@ -78,19 +78,22 @@ def plot_signal(self): self.signal_plot.plot() return # Create the figure - self.xlabel = '%s' % str(self.axes_manager.signal_axes[0]) - if self.axes_manager.signal_axes[0].units is not Undefined: - self.xlabel += ' (%s)' % self.axes_manager.signal_axes[0].units - self.ylabel = self.quantity_label self.axis = self.axes_manager.signal_axes[0] sf = signal1d.Signal1DFigure(title=self.signal_title + " Signal") - sf.xlabel = self.xlabel - sf.ylabel = self.ylabel sf.axis = self.axis if sf.ax is None: sf.create_axis() sf.axes_manager = self.axes_manager + self.xlabel = '{}'.format(self.axes_manager.signal_axes[0]) + if self.axes_manager.signal_axes[0].units is not Undefined: + self.xlabel += ' ({})'.format( + self.axes_manager.signal_axes[0].units) + self.ylabel = self.quantity_label if self.quantity_label is not '' \ + else 'Intensity' + sf.xlabel = self.xlabel + sf.ylabel = self.ylabel + self.signal_plot = sf # Create a line to the left axis with the default indices sl = signal1d.Signal1DLine() diff --git a/hyperspy/drawing/signal1d.py b/hyperspy/drawing/signal1d.py index 2fab748d88..d22d836315 100644 --- a/hyperspy/drawing/signal1d.py +++ b/hyperspy/drawing/signal1d.py @@ -143,7 +143,7 @@ def update(self): marker.update() for line in self.ax_lines + self.right_ax_lines: # save on figure rendering and do it at the end - line.update(render_figure=False) + line._auto_update_line(render_figure=False) if self.ax.figure.canvas.supports_blit: self.ax.hspy_fig._update_animated() else: @@ -327,9 +327,11 @@ def _auto_update_line(self, *args, **kwargs): """ if self.auto_update: - # if markers are plotted, we don't render the figure now but when - # once the markers have been updated - kwargs['render_figure'] = (len(self.ax.hspy_fig.ax_markers) == 0) + if 'render_figure' not in kwargs.keys(): + # if markers are plotted, we don't render the figure now but + # once the markers have been updated + kwargs['render_figure'] = ( + len(self.ax.hspy_fig.ax_markers) == 0) self.update(self, *args, **kwargs) def update(self, force_replot=False, render_figure=True): diff --git a/hyperspy/drawing/utils.py b/hyperspy/drawing/utils.py index 4dd2d74269..4c3a154221 100644 --- a/hyperspy/drawing/utils.py +++ b/hyperspy/drawing/utils.py @@ -448,8 +448,15 @@ def plot_images(images, If any signal is not an image, a ValueError will be raised multi-dimensional images will have each plane plotted as a separate image - cmap : matplotlib colormap, optional - The colormap used for the images, by default read from pyplot + cmap : matplotlib colormap, list, or ``'mpl_colors'``, *optional* + The colormap used for the images, by default read from ``pyplot``. + A list of colormaps can also be provided, and the images will + cycle through them. Optionally, the value ``'mpl_colors'`` will + cause the cmap to loop through the default ``matplotlib`` + colors (to match with the default output of the + :py:func:`~.drawing.utils.plot_spectra` method. + Note: if using more than one colormap, using the ``'single'`` + option for ``colorbar`` is disallowed. no_nans : bool, optional If True, set nans to zero for plotting. per_row : int, optional @@ -567,6 +574,12 @@ def plot_images(images, or try adjusting `label`, `labelwrap`, or `per_row` """ + def __check_single_colorbar(cbar): + if cbar is 'single': + raise ValueError('Cannot use a single colorbar with multiple ' + 'colormaps. Please check for compatible ' + 'arguments.') + from hyperspy.drawing.widgets import ScaleBar from hyperspy.misc import rgb_tools from hyperspy.signal import BaseSignal @@ -579,17 +592,6 @@ def plot_images(images, "multi-dimensional signal." " " + repr(type(images)) + " was given.") - # Get default colormap from pyplot: - if cmap is None: - cmap = plt.get_cmap().name - elif isinstance(cmap, mpl.colors.Colormap): - cmap = cmap.name - if centre_colormap == "auto": - if cmap in MPL_DIVERGING_COLORMAPS: - centre_colormap = True - else: - centre_colormap = False - # If input is >= 1D signal (e.g. for multi-dimensional plotting), # copy it and put it in a list so labeling works out as (x,y) when plotting if isinstance(images, @@ -609,6 +611,52 @@ def plot_images(images, if sig.axes_manager.navigation_size > 0 else 1) + # If no cmap given, get default colormap from pyplot: + if cmap is None: + cmap = [plt.get_cmap().name] + elif cmap == 'mpl_colors': + for n_color, c in enumerate(mpl.rcParams['axes.prop_cycle']): + make_cmap(colors=['#000000', c['color']], + name='mpl{}'.format(n_color)) + cmap = ['mpl{}'.format(i) for i in + range(len(mpl.rcParams['axes.prop_cycle']))] + __check_single_colorbar(colorbar) + # cmap is list, tuple, or something else iterable (but not string): + elif hasattr(cmap, '__iter__') and not isinstance(cmap, str): + try: + cmap = [c.name for c in cmap] # convert colormap to string + except AttributeError: + cmap = [c for c in cmap] # c should be string if not colormap + __check_single_colorbar(colorbar) + elif isinstance(cmap, mpl.colors.Colormap): + cmap = [cmap.name] # convert single colormap to list with string + elif isinstance(cmap, str): + cmap = [cmap] # cmap is single string, so make it a list + else: + # Didn't understand cmap input, so raise error + raise ValueError('The provided cmap value was not understood. Please ' + 'check input values.') + + # If any of the cmaps given are diverging, and auto-centering, set the + # appropriate flag: + if centre_colormap == "auto": + centre_colormaps = [] + for c in cmap: + if c in MPL_DIVERGING_COLORMAPS: + centre_colormaps.append(True) + else: + centre_colormaps.append(False) + # if it was True, just convert to list + elif centre_colormap: + centre_colormaps = [True] + # likewise for false + elif not centre_colormap: + centre_colormaps = [False] + + # finally, convert lists to cycle generators for adaptive length: + centre_colormaps = itertools.cycle(centre_colormaps) + cmap = itertools.cycle(cmap) + def _check_arg(arg, default_value, arg_name): if isinstance(arg, list): if len(arg) != n: @@ -771,7 +819,7 @@ def _check_arg(arg, default_value, arg_name): 'single colorbar') else: g_vmax = vmax if vmax is not None else g_vmax - if centre_colormap: + if next(centre_colormaps): g_vmin, g_vmax = centre_colormap_values(g_vmin, g_vmax) # Check if we need to add a scalebar for some of the images @@ -783,6 +831,10 @@ def _check_arg(arg, default_value, arg_name): idx = 0 ax_im_list = [0] * len(isrgb) + + # Replot: create a list to store references to the images + replot_ims = [] + # Loop through each image, adding subplot for each one for i, ims in enumerate(images): # Get handles for the signal axes and axes_manager @@ -793,6 +845,7 @@ def _check_arg(arg, default_value, arg_name): ax = f.add_subplot(rows, per_row, idx + 1) axes_list.append(ax) data = im.data + centre = next(centre_colormaps) # get next value for centreing # Enable RGB plotting if rgb_tools.is_rgbx(data): @@ -805,7 +858,7 @@ def _check_arg(arg, default_value, arg_name): data, saturated_pixels[idx]) l_vmin = vmin[idx] if vmin[idx] is not None else l_vmin l_vmax = vmax[idx] if vmax[idx] is not None else l_vmax - if centre_colormap: + if centre: l_vmin, l_vmax = centre_colormap_values(l_vmin, l_vmax) # Remove NaNs (if requested) @@ -829,8 +882,8 @@ def _check_arg(arg, default_value, arg_name): if not isinstance(aspect, (int, float)) and aspect not in [ 'auto', 'square', 'equal']: - print('Did not understand aspect ratio input. ' - 'Using \'auto\' as default.') + _logger.warning("Did not understand aspect ratio input. " + "Using 'auto' as default.") aspect = 'auto' if aspect is 'auto': @@ -850,19 +903,23 @@ def _check_arg(arg, default_value, arg_name): if 'interpolation' not in kwargs.keys(): kwargs['interpolation'] = 'nearest' + # Get colormap for this image: + cm = next(cmap) + # Plot image data, using vmin and vmax to set bounds, # or allowing them to be set automatically if using individual # colorbars if colorbar is 'single' and not isrgb[i]: axes_im = ax.imshow(data, - cmap=cmap, extent=extent, + cmap=cm, + extent=extent, vmin=g_vmin, vmax=g_vmax, aspect=asp, *args, **kwargs) ax_im_list[i] = axes_im else: axes_im = ax.imshow(data, - cmap=cmap, + cmap=cm, extent=extent, vmin=l_vmin, vmax=l_vmax, @@ -924,6 +981,9 @@ def _check_arg(arg, default_value, arg_name): units=axes[0].units, color=scalebar_color, ) + # Replot: store references to the images + replot_ims.append(im) + idx += 1 # If using a single colorbar, add it, and do tight_layout, ensuring that @@ -970,6 +1030,36 @@ def _check_arg(arg, default_value, arg_name): if padding is not None: plt.subplots_adjust(**padding) + # Replot: connect function + def on_dblclick(event): + # On the event of a double click, replot the selected subplot + if not event.inaxes: + return + if not event.dblclick: + return + subplots = [axi for axi in f.axes if isinstance(axi, mpl.axes.Subplot)] + inx = list(subplots).index(event.inaxes) + im = replot_ims[inx] + + # Use some of the info in the subplot + cm = subplots[inx].images[0].get_cmap() + clim = subplots[inx].images[0].get_clim() + + sbar = False + if (scalelist and inx in scalebar) or scalebar is 'all': + sbar = True + + im.plot(colorbar=bool(colorbar), + vmin=clim[0], + vmax=clim[1], + no_nans=no_nans, + aspect=asp, + scalebar=sbar, + scalebar_color=scalebar_color, + cmap=cm) + + f.canvas.mpl_connect('button_press_event', on_dblclick) + return axes_list @@ -988,6 +1078,79 @@ def set_axes_decor(ax, axes_decor): ax.set_yticklabels([]) +def make_cmap(colors, name='my_colormap', position=None, + bit=False, register=True): + """ + Create a matplotlib colormap with customized colors, optionally registering + it with matplotlib for simplified use. + + Adapted from Chris Slocum's code at: + https://github.com/CSlocumWX/custom_colormap/blob/master/custom_colormaps.py + and used under the terms of that code's BSD-3 license + + Parameters + ---------- + colors : iterable + list of either tuples containing rgb values, or html strings + Colors should be arranged so that the first color is the lowest + value for the colorbar and the last is the highest. + name : str + name of colormap to use when registering with matplotlib + position : None or iterable + list containing the values (from [0,1]) that dictate the position + of each color within the colormap. If None (default), the colors + will be equally-spaced within the colorbar. + bit : boolean + True if RGB colors are given in 8-bit [0 to 255] or False if given + in arithmetic basis [0 to 1] (default) + register : boolean + switch to control whether or not to register the custom colormap + with matplotlib in order to enable use by just the name string + """ + def _html_color_to_rgb(color_string): + """ convert #RRGGBB to an (R, G, B) tuple """ + color_string = color_string.strip() + if color_string[0] == '#': + color_string = color_string[1:] + if len(color_string) != 6: + raise ValueError( + "input #{} is not in #RRGGBB format".format(color_string)) + r, g, b = color_string[:2], color_string[2:4], color_string[4:] + r, g, b = [int(n, 16) / 255 for n in (r, g, b)] + return r, g, b + + bit_rgb = np.linspace(0, 1, 256) + + if position is None: + position = np.linspace(0, 1, len(colors)) + else: + if len(position) != len(colors): + raise ValueError("position length must be the same as colors") + elif position[0] != 0 or position[-1] != 1: + raise ValueError("position must start with 0 and end with 1") + + cdict = {'red': [], 'green': [], 'blue': []} + + for pos, color in zip(position, colors): + if isinstance(color, str): + color = _html_color_to_rgb(color) + + elif bit: + color = (bit_rgb[color[0]], + bit_rgb[color[1]], + bit_rgb[color[2]]) + + cdict['red'].append((pos, color[0], color[0])) + cdict['green'].append((pos, color[1], color[1])) + cdict['blue'].append((pos, color[2], color[2])) + + cmap = mpl.colors.LinearSegmentedColormap(name, cdict, 256) + + if register: + mpl.cm.register_cmap(name, cmap) + return cmap + + def plot_spectra( spectra, style='overlap', @@ -1206,7 +1369,7 @@ def animate_legend(figure='last'): ax = plt.gca() else: ax = figure.axes[0] - lines = ax.lines + lines = ax.lines[::-1] lined = dict() leg = ax.get_legend() for legline, origline in zip(leg.get_lines(), lines): diff --git a/hyperspy/drawing/widget.py b/hyperspy/drawing/widget.py index 5dc2d6dc3f..d70cc74e21 100644 --- a/hyperspy/drawing/widget.py +++ b/hyperspy/drawing/widget.py @@ -45,7 +45,7 @@ class WidgetBase(object): needed. """ - def __init__(self, axes_manager=None, **kwargs): + def __init__(self, axes_manager=None, color='red', alpha=1.0, **kwargs): self.axes_manager = axes_manager self._axes = list() self.ax = None @@ -53,11 +53,12 @@ def __init__(self, axes_manager=None, **kwargs): self.selected = False self._selected_artist = None self._size = 1. - self._pos = 0. - self.color = 'red' + self._pos = np.array([0.]) self.__is_on = True self.background = None self.patch = [] + self.color = color + self.alpha = alpha self.cids = list() self.blit = True self.events = Events() @@ -132,6 +133,26 @@ def set_on(self, value): self.ax = None self.__is_on = value + @property + def color(self): + return self._color + + @color.setter + def color(self, color): + self._color = color + for p in self.patch: + p.set_color(self._color) + + @property + def alpha(self): + return self._alpha + + @alpha.setter + def alpha(self, alpha): + self._alpha = alpha + for p in self.patch: + p.set_alpha(self._alpha) + def _set_patch(self): """Create the matplotlib patch(es), and store it in self.patch """ diff --git a/hyperspy/external/astroML/histtools.py b/hyperspy/external/astroML/histtools.py index c81b699a07..998115ba96 100644 --- a/hyperspy/external/astroML/histtools.py +++ b/hyperspy/external/astroML/histtools.py @@ -381,8 +381,8 @@ def dasky_scotts_bin_width(data, return_bins=True): See Also -------- - knuth_bin_width, - freedman_bin_width, + knuth_bin_width, + freedman_bin_width, astroML.plotting.hist """ if not isinstance(data, da.Array): diff --git a/hyperspy/io.py b/hyperspy/io.py index af2e67818a..f30e08cfbc 100644 --- a/hyperspy/io.py +++ b/hyperspy/io.py @@ -49,13 +49,14 @@ def load(filenames=None, stack_axis=None, new_axis_name="stack_element", lazy=False, + convert_units=False, **kwds): """ - Load potentially multiple supported file into an hyperspy structure + Load potentially multiple supported file into an hyperspy structure. - Supported formats: hspy (HDF5), msa, Gatan dm3, Ripple (rpl+raw), Bruker bcf, - FEI ser and emi, SEMPER unf, EMD, EDAX spd/spc, tif, and a number - of image formats. + Supported formats: hspy (HDF5), msa, Gatan dm3, Ripple (rpl+raw), + Bruker bcf and spx, FEI ser and emi, SEMPER unf, EMD, EDAX spd/spc, + tif, and a number of image formats. Any extra keyword is passed to the corresponding reader. For available options see their individual documentation. @@ -105,13 +106,15 @@ def load(filenames=None, until it finds a name that is not yet in use. lazy : {None, bool} Open the data lazily - i.e. without actually reading the data from the - disk until required. Allows opening arbitrary-sized datasets. default + disk until required. Allows opening arbitrary-sized datasets. The default is `False`. + convert_units : {bool} + If True, convert the units using the `convert_to_units` method of + the `axes_manager`. If False, does nothing. The default is False. print_info: bool For SEMPER unf- and EMD (Berkley)-files, if True (default is False) additional information read during loading is printed for a quick overview. - downsample : int (1–4095) For Bruker bcf files, if set to integer (>=2) (default 1) bcf is parsed into down-sampled size array by given integer factor, @@ -123,10 +126,38 @@ def load(filenames=None, bcf is parsed into array with depth cutoff at coresponding given energy. This allows to conserve the memory, with cutting-off unused spectra's tail, or force enlargement of the spectra size. - select_type: {'spectrum', 'image', None} - For Bruker bcf files, if one of 'spectrum' or 'image' (default is None) - the loader returns either only hypermap or only SEM/TEM electron images. - + select_type: {'spectrum_image', 'image', 'single_spectrum', None} + If `None` (default), all data are loaded. + For Bruker bcf and FEI emd files: if one of 'spectrum_image', 'image' or + 'single_spectrum', the loader return single_spectrumns either only the + spectrum image or only the images (including EDS map for FEI emd files) + or only the single spectra (for FEI emd files). + first_frame : int (default 0) + Only for FEI emd files: load only the data acquired after the specified + fname. + last_frame : None or int (default None) + Only for FEI emd files: load only the data acquired up to specified + fname. If None, load up the data to the end. + sum_frames : bool (default is True) + Only for FEI emd files: load each EDS frame individually. + sum_EDS_detectors : bool (default is True) + Only for FEI emd files: load each frame individually. If True, the signal + from the different detector are summed. If False, a distinct signal is + returned for each EDS detectors. + rebin_energy : int, a multiple of the length of the energy dimension (default 1) + Only for FEI emd files: rebin the energy axis by the integer provided + during loading in order to save memory space. + SI_data_dtype : numpy.dtype + Only for FEI emd files: set the dtype of the spectrum image data in + order to save memory space. If None, the default dtype from the FEI emd + file is used. + load_SI_image_stack : bool (default False) + Load the stack of STEM images acquired simultaneously as the EDS + spectrum image. + dataset_name : string or list, optional + For filetypes which support several datasets in the same file, this + will only load the specified dataset. Several datasets can be loaded + by using a list of strings. Only for EMD (NCEM) files. Returns @@ -160,7 +191,7 @@ def load(filenames=None, warnings.warn(warn_str.format(k), VisibleDeprecationWarning) del kwds[k] kwds['signal_type'] = signal_type - + kwds['convert_units'] = convert_units if filenames is None: from hyperspy.signal_tools import Load load_ui = Load() @@ -246,9 +277,7 @@ def load(filenames=None, return objects -def load_single_file(filename, - signal_type=None, - **kwds): +def load_single_file(filename, **kwds): """ Load any supported file into an HyperSpy structure Supported formats: netCDF, msa, Gatan dm3, Ripple (rpl+raw), @@ -272,22 +301,16 @@ def load_single_file(filename, try: from hyperspy.io_plugins import image reader = image - return load_with_reader(filename, reader, - signal_type=signal_type, **kwds) - except: + return load_with_reader(filename, reader, **kwds) + except BaseException: raise IOError('If the file format is supported' ' please report this error') else: reader = io_plugins[i] - return load_with_reader(filename=filename, - reader=reader, - signal_type=signal_type, - **kwds) + return load_with_reader(filename=filename, reader=reader, **kwds) -def load_with_reader(filename, - reader, - signal_type=None, +def load_with_reader(filename, reader, signal_type=None, convert_units=False, **kwds): lazy = kwds.get('lazy', False) file_data_list = reader.file_reader(filename, @@ -306,6 +329,8 @@ def load_with_reader(filename, objects[-1].tmp_parameters.folder = folder objects[-1].tmp_parameters.filename = filename objects[-1].tmp_parameters.extension = extension.replace('.', '') + if convert_units: + objects[-1].axes_manager.convert_units() else: # it's a standalone model continue @@ -477,11 +502,11 @@ def save(filename, signal, overwrite=None, **kwds): ensure_directory(filename) is_file = os.path.isfile(filename) if overwrite is None: - write = overwrite_method(filename) # Ask what to do + write = overwrite_method(filename) # Ask what to do elif overwrite is True or (overwrite is False and not is_file): - write = True # Write the file + write = True # Write the file elif overwrite is False and is_file: - write = False # Don't write the file + write = False # Don't write the file else: raise ValueError("`overwrite` parameter can only be None, True or " "False.") diff --git a/hyperspy/io_plugins/__init__.py b/hyperspy/io_plugins/__init__.py index 2618118e16..921db9c1db 100644 --- a/hyperspy/io_plugins/__init__.py +++ b/hyperspy/io_plugins/__init__.py @@ -19,12 +19,14 @@ import logging -from hyperspy.io_plugins import (msa, digital_micrograph, fei, mrc, ripple, +from hyperspy.io_plugins import (msa, digital_micrograph, fei, mrc, mrcz, ripple, tiff, semper_unf, blockfile, dens, emd, - protochips, edax, bcf) + protochips, edax, bruker) + + +io_plugins = [msa, digital_micrograph, fei, mrc, mrcz, ripple, tiff, semper_unf, + blockfile, dens, emd, protochips, edax, bruker] -io_plugins = [msa, digital_micrograph, fei, mrc, ripple, tiff, semper_unf, - blockfile, dens, emd, protochips, edax, bcf] _logger = logging.getLogger(__name__) diff --git a/hyperspy/io_plugins/blockfile.py b/hyperspy/io_plugins/blockfile.py index 5c9a7c4f08..a26bef7314 100644 --- a/hyperspy/io_plugins/blockfile.py +++ b/hyperspy/io_plugins/blockfile.py @@ -38,7 +38,6 @@ # Recognised file extension file_extensions = ['blo', 'BLO'] default_extension = 0 - # Writing capabilities: writes = [(2, 2), (2, 1), (2, 0)] magics = [0x0102] @@ -107,23 +106,29 @@ def get_header_from_signal(signal, endianess='<'): note = signal.original_metadata['blockfile_header']['Note'] else: note = '' - if signal.axes_manager.navigation_dimension == 2: - NX, NY = signal.axes_manager.navigation_shape - SX = signal.axes_manager.navigation_axes[0].scale - SY = signal.axes_manager.navigation_axes[1].scale - elif signal.axes_manager.navigation_dimension == 1: - NX = signal.axes_manager.navigation_shape[0] + # The navigation and signal units are 'nm' and 'cm', respectively, so we + # convert the units accordingly before saving the signal + axes_manager = signal.axes_manager.deepcopy() + axes_manager.convert_units('navigation', 'nm') + axes_manager.convert_units('signal', 'cm') + + if axes_manager.navigation_dimension == 2: + NX, NY = axes_manager.navigation_shape + SX = axes_manager.navigation_axes[0].scale + SY = axes_manager.navigation_axes[1].scale + elif axes_manager.navigation_dimension == 1: + NX = axes_manager.navigation_shape[0] NY = 1 - SX = signal.axes_manager.navigation_axes[0].scale + SX = axes_manager.navigation_axes[0].scale SY = SX - elif signal.axes_manager.navigation_dimension == 0: + elif axes_manager.navigation_dimension == 0: NX = NY = SX = SY = 1 - DP_SZ = signal.axes_manager.signal_shape + DP_SZ = axes_manager.signal_shape if DP_SZ[0] != DP_SZ[1]: raise ValueError('Blockfiles require signal shape to be square!') DP_SZ = DP_SZ[0] - SDP = 100. / signal.axes_manager.signal_axes[0].scale + SDP = 100. / axes_manager.signal_axes[0].scale offset2 = NX * NY + header['Data_offset_1'] # Based on inspected files, the DPs are stored at 16-bit boundary... @@ -166,7 +171,7 @@ def file_reader(filename, endianess='<', mmap_mode=None, # It seems it uses "\x00" for padding, so we remove it try: header['Note'] = note.decode("latin1").strip("\x00") - except: + except BaseException: # Not sure about the encoding so, if it fails, we carry on _logger.warning( "Reading the Note metadata of this file failed. " diff --git a/hyperspy/io_plugins/bcf.py b/hyperspy/io_plugins/bruker.py similarity index 87% rename from hyperspy/io_plugins/bcf.py rename to hyperspy/io_plugins/bruker.py index 7545820cc4..c62aa30bc3 100644 --- a/hyperspy/io_plugins/bcf.py +++ b/hyperspy/io_plugins/bruker.py @@ -33,7 +33,7 @@ SEM/TEM (limited) parameters""" full_support = False # Recognised file extension -file_extensions = ('bcf',) +file_extensions = ('bcf', 'spx') default_extension = 0 # Reading capabilities reads_images = True @@ -57,6 +57,7 @@ import logging import re from math import ceil +from os.path import splitext _logger = logging.getLogger(__name__) @@ -76,7 +77,7 @@ # without minus sign, second group looks for numeric value with following # closing <\tag> (the '<' char); '([Ee]-?\d*)' part (optionally a third group) # checks for scientific notation (e.g. 8,843E-7 -> 'E-7'); -# compiled pattern is binary, as raw xml string is binary.: +# compiled pattern is binary, as raw xml string is binary.: fix_dec_patterns = re.compile(b'(>-?\\d+),(\\d*([Ee]-?\\d*)?<)') @@ -464,14 +465,16 @@ def dictionarize(t): for dc in map(dictionarize, children): for k, v in dc.items(): dd[k].append(v) - d = {t.tag: {k:interpret(v[0]) if len(v) == 1 else v for k, v in dd.items()}} + d = {t.tag: {k: interpret(v[0]) if len( + v) == 1 else v for k, v in dd.items()}} if t.attrib: - d[t.tag].update(('XmlClass' + k if list(t) else k, interpret(v)) for k, v in t.attrib.items()) + d[t.tag].update(('XmlClass' + k if list(t) else k, interpret(v)) + for k, v in t.attrib.items()) if t.text: text = t.text.strip() if children or t.attrib: if text: - d[t.tag]['#text'] = interpret(text) + d[t.tag]['#text'] = interpret(text) else: d[t.tag] = interpret(text) if 'ClassInstance' in d: @@ -524,12 +527,15 @@ def __init__(self, spectrum): # USED: self.hv = self.esma_metadata['PrimaryEnergy'] self.elev_angle = self.esma_metadata['ElevationAngle'] - + date_time = gen_iso_date_time(spectrum_header) + if date_time is not None: + self.date, self.time = date_time + # map stuff from spectra xml branch: self.spectrum_metadata = dictionarize(spectrum_header) self.offset = self.spectrum_metadata['CalibAbs'] self.scale = self.spectrum_metadata['CalibLin'] - + # main data: self.data = np.fromstring(spectrum.find('./Channels').text, dtype='Q', sep=",") @@ -573,15 +579,11 @@ def __init__(self, xml_str, indexes, instrument=None): self.name = 'Undefinded' _logger.info("hypermap have no name. Giving it 'Undefined' name") hd = root.find("./Header") - dt = datetime.strptime(' '.join([str(hd.find('./Date').text), - str(hd.find('./Time').text)]), - "%d.%m.%Y %H:%M:%S") - self.date = dt.date().isoformat() - self.time = dt.time().isoformat() + self.date, self.time = gen_iso_date_time(hd) self.version = int(hd.find('./FileVersion').text) # fill the sem and stage attributes: self._set_microscope(root) - self._get_mode(instrument) + self._set_mode(instrument) self._set_images(root) self.elements = {} self._set_elements(root) @@ -621,21 +623,11 @@ def _set_microscope(self, root): DSPConf = root.find("./ClassInstance[@Type='TRTDSPConfiguration']") self.dsp_metadata = dictionarize(DSPConf) - def _get_mode(self, instrument=None): - # where is no way to determine what kind of instrument was used: - # TEM or SEM (mode attribute) - hv = self.hv + def _set_mode(self, instrument=None): if instrument is not None: self.mode = instrument - elif hv > 30.0: # workaround to know if TEM or SEM - self.mode = 'TEM' else: - self.mode = 'SEM' - _logger.info( - "Guessing that the acquisition instrument is %s " % self.mode + - "because the beam energy is %i keV. If this is wrong, " % hv + - "please provide the right instrument using the 'instrument' " + - "keyword.") + self.mode = guess_mode(self.hv) def get_acq_instrument_dict(self, detector=False, **kwargs): """return python dictionary with aquisition instrument @@ -646,13 +638,9 @@ def get_acq_instrument_dict(self, detector=False, **kwargs): acq_inst['magnification'] = self.sem_metadata['Mag'] if detector: eds_metadata = self.get_spectra_metadata(**kwargs) - acq_inst['Detector'] = {'EDS': { - 'elevation_angle': eds_metadata.elev_angle, - 'detector_type': eds_metadata.detector_type, - 'real_time': self.calc_real_time()}} - if 'AzimutAngle' in eds_metadata.esma_metadata: - acq_inst['Detector']['EDS'][ - 'azimuth_angle'] = eds_metadata.esma_metadata['AzimutAngle'] + det = gen_detector_node(eds_metadata) + det['EDS']['real_time'] = self.calc_real_time() + acq_inst['Detector'] = det return acq_inst def _parse_image(self, xml_node, overview=False): @@ -737,8 +725,8 @@ def _set_elements(self, root): "./ClassInstance[@Type='TRTSpectrumRegion']"): tmp_d = dictionarize(j) self.elements[tmp_d['XmlClassName']] = {'line': tmp_d['Line'], - 'energy': tmp_d['Energy'], - 'width': tmp_d['Width']} + 'energy': tmp_d['Energy'], + 'width': tmp_d['Width']} except AttributeError: _logger.info('no element selection present in the spectra..') @@ -891,18 +879,19 @@ def __init__(self, filename, instrument=None): self.def_index = min(self.available_indexes) header_byte_str = header_file.get_as_BytesIO_string().getvalue() hd_bt_str = fix_dec_patterns.sub(b'\\1.\\2', header_byte_str) - self.header = HyperHeader(hd_bt_str, self.available_indexes, instrument=instrument) + self.header = HyperHeader( + hd_bt_str, self.available_indexes, instrument=instrument) self.hypermap = {} def check_index_valid(self, index): - """check and return if index is valid""" + """check and return if index is valid""" if type(index) != int: raise TypeError("provided index should be integer") if index not in self.available_indexes: raise IndexError("requisted index is not in the list of available indexes. " - "Available maps are under indexes: {0}".format(str(self.available_indexes))) + "Available maps are under indexes: {0}".format(str(self.available_indexes))) return index - + def parse_hypermap(self, index=None, downsample=1, cutoff_at_kV=None, lazy=False): @@ -964,6 +953,56 @@ def add_filename_to_general(self, item): item['metadata']['General']['original_filename'] = \ self.filename.split('/')[-1] +def spx_reader(filename, lazy=False): + with open(filename, 'br') as fn: + xml_str = fn.read() + root = ET.fromstring(xml_str) + sp_node = root.find("./ClassInstance[@Type='TRTSpectrum']") + try: + name = str(sp_node.attrib['Name']) + except KeyError: + name = 'Undefinded' + _logger.info("spectra have no name. Giving it 'Undefined' name") + spectrum = EDXSpectrum(sp_node) + mode = guess_mode(spectrum.hv) + results_xml = sp_node.find("./ClassInstance[@Type='TRTResult']") + elements_xml = sp_node.find("./ClassInstance[@Type='TRTPSEElementList']") + hy_spec = {'data': spectrum.data, + 'axes': [{'name': 'Energy', + 'size': len(spectrum.data), + 'offset': spectrum.offset, + 'scale': spectrum.scale, + 'units': 'keV'}], + 'metadata': + # where is no way to determine what kind of instrument was used: + # TEM or SEM + {'Acquisition_instrument': { + mode: {'Detector': + gen_detector_node(spectrum), + 'beam_energy': spectrum.hv} + }, + 'General': {'original_filename': filename.split('/')[-1], + 'title': 'EDX', + 'date': spectrum.date, + 'time': spectrum.time}, + 'Sample': {'name': name}, + 'Signal': {'signal_type': 'EDS_%s' % mode, + 'record_by': 'spectrum', + 'quantity': 'X-rays (Counts)'} + }, + 'original_metadata': {'Hardware': spectrum.hardware_metadata, + 'Detector': spectrum.detector_metadata, + 'Analysis': spectrum.esma_metadata, + 'Spectrum': spectrum.spectrum_metadata,} + } + if results_xml is not None: + hy_spec['original_metadata']['Results'] = dictionarize(results_xml) + if elements_xml is not None: + elem = dictionarize(elements_xml)['ChildClassInstances'] + hy_spec['original_metadata']['Selected_elements'] = elem + hy_spec['metadata']['Sample']['elements'] = elem['XmlClassName'] + return [hy_spec] + # dict of nibbles to struct notation for reading: st = {1: 'B', 2: 'B', 4: 'H', 8: 'I', 16: 'Q'} @@ -1143,15 +1182,22 @@ def py_parse_hypermap(virtual_file, shape, dtype, downsample=1): return vfa -# wrapper functions for hyperspy: -def file_reader(filename, select_type=None, index=None, downsample=1, # noqa - cutoff_at_kV=None, instrument=None, lazy=False): +def file_reader(filename, *args, **kwds): + ext = splitext(filename)[1][1:] + if ext == 'bcf': + return bcf_reader(filename, *args, **kwds) + elif ext == 'spx': + return spx_reader(filename, *args, **kwds) + + +def bcf_reader(filename, select_type=None, index=None, # noqa + downsample=1, cutoff_at_kV=None, instrument=None, lazy=False): """Reads a bruker bcf file and loads the data into the appropriate class, then wraps it into appropriate hyperspy required list of dictionaries used by hyperspy.api.load() method. Keyword arguments: - select_type -- One of: spectrum, image. If none specified, then function + select_type -- One of: spectrum_image, image. If none specified, then function loads everything, else if specified, loads either just sem imagery, or just hyper spectral mapping data (default None). index -- index of dataset in bcf v2 can be None integer and 'all' @@ -1168,9 +1214,17 @@ def file_reader(filename, select_type=None, index=None, downsample=1, # noqa # objectified bcf file: obj_bcf = BCF_reader(filename, instrument=instrument) + if select_type == 'spectrum': + select_type = 'spectrum_image' + from hyperspy.misc.utils import deprecation_warning + msg = ( + "The 'spectrum' option for the `select_type` parameter is " + "deprecated and will be removed in v2.0. Use 'spectrum_image' " + "instead.") + deprecation_warning(msg) if select_type == 'image': return bcf_images(obj_bcf) - elif select_type == 'spectrum': + elif select_type == 'spectrum_image': return bcf_hyperspectra(obj_bcf, index=index, downsample=downsample, cutoff_at_kV=cutoff_at_kV, @@ -1244,34 +1298,34 @@ def bcf_hyperspectra(obj_bcf, index=None, downsample=None, cutoff_at_kV=None, # 'scale': eds_metadata.scale, 'units': 'keV'}], 'metadata': - # where is no way to determine what kind of instrument was used: - # TEM or SEM - {'Acquisition_instrument': { - mode: obj_bcf.header.get_acq_instrument_dict( - detector=True, - index=index) - }, - 'General': {'original_filename': obj_bcf.filename.split('/')[-1], - 'title': 'EDX', - 'date': obj_bcf.header.date, - 'time': obj_bcf.header.time}, - 'Sample': {'name': obj_bcf.header.name, - 'elements': sorted(list(obj_bcf.header.elements)), - 'xray_lines': sorted(gen_elem_list(obj_bcf.header.elements))}, - 'Signal': {'signal_type': 'EDS_%s' % mode, - 'record_by': 'spectrum', - 'quantity': 'X-rays (Counts)'} - }, - 'original_metadata': {'Hardware': eds_metadata.hardware_metadata, - 'Detector': eds_metadata.detector_metadata, - 'Analysis': eds_metadata.esma_metadata, - 'Spectrum': eds_metadata.spectrum_metadata, - 'DSP Configuration': obj_bcf.header.dsp_metadata, - 'Line counter': obj_bcf.header.line_counter, - 'Stage': obj_bcf.header.stage_metadata, - 'Microscope': obj_bcf.header.sem_metadata}, - 'mapping': mapping, - }) + # where is no way to determine what kind of instrument was used: + # TEM or SEM + {'Acquisition_instrument': { + mode: obj_bcf.header.get_acq_instrument_dict( + detector=True, + index=index) + }, + 'General': {'original_filename': obj_bcf.filename.split('/')[-1], + 'title': 'EDX', + 'date': obj_bcf.header.date, + 'time': obj_bcf.header.time}, + 'Sample': {'name': obj_bcf.header.name, + 'elements': sorted(list(obj_bcf.header.elements)), + 'xray_lines': sorted(gen_elem_list(obj_bcf.header.elements))}, + 'Signal': {'signal_type': 'EDS_%s' % mode, + 'record_by': 'spectrum', + 'quantity': 'X-rays (Counts)'} + }, + 'original_metadata': {'Hardware': eds_metadata.hardware_metadata, + 'Detector': eds_metadata.detector_metadata, + 'Analysis': eds_metadata.esma_metadata, + 'Spectrum': eds_metadata.spectrum_metadata, + 'DSP Configuration': obj_bcf.header.dsp_metadata, + 'Line counter': obj_bcf.header.line_counter, + 'Stage': obj_bcf.header.stage_metadata, + 'Microscope': obj_bcf.header.sem_metadata}, + 'mapping': mapping, + }) return hyperspectra @@ -1305,3 +1359,39 @@ def get_mapping(mode): 'Stage.Z': ("Acquisition_instrument.%s.Stage.z" % mode, None), } + +def guess_mode(hv): + """there is no way to determine what kind of instrument + was used from metadata: TEM or SEM. + However simple guess can be made using the acceleration + voltage, assuming that SEM is <= 30kV or TEM is >30kV""" + if hv > 30.0: + mode = 'TEM' + else: + mode = 'SEM' + _logger.info( + "Guessing that the acquisition instrument is %s " % mode + + "because the beam energy is %i keV. If this is wrong, " % hv + + "please provide the right instrument using the 'instrument' " + + "keyword.") + return mode + +def gen_detector_node(spectrum): + eds_dict = {'EDS': {'elevation_angle': spectrum.elev_angle, + 'detector_type': spectrum.detector_type,}} + if 'AzimutAngle' in spectrum.esma_metadata: + eds_dict['EDS']['azimuth_angle'] = spectrum.esma_metadata['AzimutAngle'] + if 'RealTime' in spectrum.hardware_metadata: + eds_dict['EDS']['real_time'] = spectrum.hardware_metadata['RealTime'] / 1000 + eds_dict['EDS']['live_time'] = spectrum.hardware_metadata['LifeTime'] / 1000 + return eds_dict + +def gen_iso_date_time(node): + date_xml = node.find('./Date') + time_xml = node.find('./Time') + if date_xml is not None: + dt = datetime.strptime(' '.join([date_xml.text, time_xml.text]), + "%d.%m.%Y %H:%M:%S") + date = dt.date().isoformat() + time = dt.time().isoformat() + return date, time diff --git a/hyperspy/io_plugins/dens.py b/hyperspy/io_plugins/dens.py index 3f0f3c936c..22f97817cf 100644 --- a/hyperspy/io_plugins/dens.py +++ b/hyperspy/io_plugins/dens.py @@ -32,7 +32,6 @@ # Recognised file extension file_extensions = ['dens', 'DENS'] default_extension = 0 - # Writing capabilities writes = False diff --git a/hyperspy/io_plugins/digital_micrograph.py b/hyperspy/io_plugins/digital_micrograph.py index 4ebe7d4152..9580b57d86 100644 --- a/hyperspy/io_plugins/digital_micrograph.py +++ b/hyperspy/io_plugins/digital_micrograph.py @@ -46,7 +46,6 @@ # Recognised file extension file_extensions = ('dm3', 'DM3', 'dm4', 'DM4') default_extension = 0 - # Writing features writes = False # ---------------------- diff --git a/hyperspy/io_plugins/edax.py b/hyperspy/io_plugins/edax.py index e4f89ef4ea..29c2ef163f 100644 --- a/hyperspy/io_plugins/edax.py +++ b/hyperspy/io_plugins/edax.py @@ -25,7 +25,7 @@ import numpy as np from hyperspy.misc.array_tools import sarray2dict import traits.api as t -from hyperspy.misc.elements import elements_db +from hyperspy.misc.elements import atomic_number2name _logger = logging.getLogger(__name__) @@ -46,25 +46,12 @@ # Recognised file extension file_extensions = ['spd', 'SPD', 'spc', 'SPC'] default_extension = 0 - # Writing capabilities writes = False spd_extensions = ('spd', 'SPD', 'Spd') spc_extensions = ('spc', 'SPC', 'Spc') -# read dictionary of atomic numbers from HyperSpy, and add the elements that -# do not currently exist in the database (in case anyone is doing EDS on -# Ununpentium...) -atomic_num_dict = dict((p.General_properties.Z, e) for (e, p) in elements_db) -atomic_num_dict.update({93: 'Np', 94: 'Pu', 95: 'Am', 96: 'Cm', 97: 'Bk', - 98: 'Cf', 99: 'Es', 100: 'Fm', 101: 'Md', 102: 'No', - 103: 'Lr', 104: 'Rf', 105: 'Db', 106: 'Sg', 107: 'Bh', - 108: 'Hs', 109: 'Mt', 110: 'Ds', 111: 'Rg', 112: 'Cp', - 113: 'Uut', 114: 'Uuq', 115: 'Uup', 116: 'Uuh', - 117: 'Uus', - 118: 'Uuo'}) - def get_spd_dtype_list(endianess='<'): """ @@ -518,7 +505,7 @@ def get_spc_dtype_list(load_all=False, endianess='<', version=0.61): ('numZElements', end + 'i2'), # 20800 ('zAtoms', end + '48i2'), # 20802 ('zShells', end + '48i2'), # 20898 - ]) + ]) else: dtype_list = \ @@ -737,7 +724,7 @@ def _add_spc_metadata(metadata, spc_header): # Get elements stored in spectrum: num_elem = spc_header['numElem'] if num_elem > 0: - element_list = sorted([atomic_num_dict[i] for + element_list = sorted([atomic_number2name[i] for i in spc_header['at'][:num_elem]]) metadata['Sample'] = {'elements': element_list} _logger.info(" Elemental information found in the spectral metadata " @@ -818,7 +805,6 @@ def spc_reader(filename, def spd_reader(filename, endianess='<', - nav_units=None, spc_fname=None, ipr_fname=None, load_all_spc=False, @@ -832,10 +818,6 @@ def spd_reader(filename, Name of SPD file to read endianess : char Byte-order of data to read - nav_units : 'nm', 'um', or None - Default navigation units for EDAX data is in microns, so this is the - default unit to save in the signal. Can also be specified as 'nm', - which will output a signal with nm scale instead. spc_fname : None or str Name of file from which to read the spectral calibration. If data was exported fully from EDAX TEAM software, an .spc file with the @@ -846,14 +828,14 @@ def spd_reader(filename, ipr_fname : None or str Name of file from which to read the spatial calibration. If data was exported fully from EDAX TEAM software, an .ipr file with the - same name as the .spd (plus a \"_Img\" suffix) should be present. + same name as the .spd (plus a "_Img" suffix) should be present. If `None`, the default filename will be searched for. Otherwise, the name of the .ipr file to use for spatial calibration can be explicitly given as a string. load_all_spc : bool Switch to control if all of the .spc header is read, or just the important parts for import into HyperSpy - kwargs** + **kwargs Remaining arguments are passed to the Numpy ``memmap`` function Returns @@ -959,20 +941,13 @@ def spd_reader(filename, 'units': 'keV' if read_spc else t.Undefined, } - # Handle navigation units input: - scale = 1000 if nav_units == 'nm' else 1 - if nav_units is not 'nm': - if nav_units not in [None, 'um']: - _logger.warning("Did not understand nav_units input \"{}\". " - "Defaulting to microns.\n".format(nav_units)) - nav_units = r'$\mu m$' - + nav_units = 'µm' # Create navigation axes dictionaries: x_axis = { 'size': data.shape[1], 'index_in_array': 1, 'name': 'x', - 'scale': original_metadata['ipr_header']['mppX'] * scale if read_ipr + 'scale': original_metadata['ipr_header']['mppX'] if read_ipr else 1, 'offset': 0, 'units': nav_units if read_ipr else t.Undefined, @@ -982,7 +957,7 @@ def spd_reader(filename, 'size': data.shape[0], 'index_in_array': 0, 'name': 'y', - 'scale': original_metadata['ipr_header']['mppY'] * scale if read_ipr + 'scale': original_metadata['ipr_header']['mppY'] if read_ipr else 1, 'offset': 0, 'units': nav_units if read_ipr else t.Undefined, diff --git a/hyperspy/io_plugins/emd.py b/hyperspy/io_plugins/emd.py index 038a3fd960..f287f71961 100644 --- a/hyperspy/io_plugins/emd.py +++ b/hyperspy/io_plugins/emd.py @@ -18,30 +18,82 @@ # The EMD format is a hdf5 standard proposed at Lawrence Berkeley # National Lab (see http://emdatasets.com/ for more information). -# NOT to be confused with the FEI EMD format which was developed later. +# FEI later developed another EMD format, also based on the hdf5 standard. This +# reader first checked if the file have been saved by Velox (FEI EMD format) +# and use either the EMD class or the FEIEMDReader class to read the file. +# Writing file is only supported for EMD Berkeley file. import re +import json +import os +from datetime import datetime +import time +import logging +import traits.api as t + import h5py import numpy as np -from dask.array import from_array - -import logging +import dask.array as da +from dateutil import tz +import pint +from hyperspy.misc.elements import atomic_number2name +import hyperspy.misc.io.fei_stream_readers as stream_readers # Plugin characteristics # ---------------------- format_name = 'Electron Microscopy Data (EMD)' description = 'Read data from Berkeleys EMD files.' -full_support = True # Hopefully? +full_support = False # Hopefully? # Recognised file extension file_extensions = ('emd', 'EMD') default_extension = 0 +# Reading capabilities +reads_images = True +reads_spectrum = True +reads_spectrum_image = True # Writing features -writes = True +writes = True # Only Berkeley emd EMD_VERSION = '0.2' # ---------------------- +_logger = logging.getLogger(__name__) + + +def calculate_chunks(shape, dtype, chunk_size_mb=100): + """Calculate chunks to get target chunk size. + + The chunks are optimized for C-order reading speed. + + Parameters + ---------- + shape: tuple of ints + The shape of the array + dtype: string or numpy dtype + The dtype of the array + chunk_size_mb: int + The maximum size of the resulting chunks in MB. The default is + 100MB as reccommended by the dask documentation. + + """ + + target = chunk_size_mb * 1e6 + items = int(target // np.dtype(dtype).itemsize) + chunks = () + dimsize = np.cumprod(shape[::-1])[::-1][1:] + for i, ds in enumerate(dimsize): + chunk = items // ds + if not chunk: + chunks += (1,) + elif chunk <= shape[i]: + chunks += (chunk, ) + else: + chunks += (shape[i],) + # At least one signal + chunks += (shape[-1], ) + return chunks + class EMD(object): @@ -159,7 +211,7 @@ def _read_signal_from_group(self, name, group, lazy=False): # Extract essential data: data = group.get('data') if lazy: - data = from_array(data, chunks=data.chunks) + data = da.from_array(data, chunks=data.chunks) else: data = np.asanyarray(data) # EMD does not have a standard way to describe the signal axis. @@ -269,7 +321,7 @@ def add_signal(self, signal, name=None, metadata=None): self.signals[name] = signal @classmethod - def load_from_emd(cls, filename, lazy=False): + def load_from_emd(cls, filename, lazy=False, dataset_name=None): """Construct :class:`~.EMD` object from an emd-file. Parameters @@ -280,6 +332,12 @@ def load_from_emd(cls, filename, lazy=False): False : bool, optional If False (default) loads data to memory. If True, enables loading only if requested. + dataset_name : string or iterable, optional + Only add dataset with specific name. Note, this has to be the full + group path in the file. For example `/experimental/science_data'. + If the dataset is not found, an IOError with the possible + datasets will be raised. Several names can be specified + in the form of a list. Returns ------- @@ -318,16 +376,37 @@ def load_from_emd(cls, filename, lazy=False): 'sample', 'comments']: # Nodes which are not the data! if key in node_list: node_list.pop(node_list.index(key)) # Pop all unwanted nodes! - # One node should remain, the data node (named 'data', 'signals', - # 'experiments', ...)! - assert len(node_list) == 1, 'Dataset location is ambiguous!' - data_group = emd_file.get(node_list[0]) - if data_group is not None: - for name, group in data_group.items(): - if isinstance(group, h5py.Group): - if group.attrs.get('emd_group_type') == 1: - emd._read_signal_from_group( - name, group, lazy) + dataset_in_file_list = [] + for node in node_list: + data_group = emd_file.get(node) + if data_group is not None: + for group in data_group.values(): + name = group.name + if isinstance(group, h5py.Group): + if group.attrs.get('emd_group_type') == 1: + dataset_in_file_list.append(name) + if len(dataset_in_file_list) == 0: + raise IOError("No datasets found in {0}".format(filename)) + dataset_read_list = [] + if dataset_name is not None: + if isinstance(dataset_name, str): + dataset_name = [dataset_name] + + for temp_dataset_name in dataset_name: + if temp_dataset_name in dataset_in_file_list: + dataset_read_list.append(temp_dataset_name) + else: + raise IOError( + "Dataset with name {0} not found in the file. " + "Possible datasets are {1}.".format( + temp_dataset_name, + ', '.join(dataset_in_file_list))) + else: + dataset_read_list = dataset_in_file_list + for dataset_read in dataset_read_list: + group = emd_file[dataset_read] + emd._read_signal_from_group(dataset_read, group, lazy) + # Close file and return EMD object: if not lazy: emd_file.close() @@ -378,7 +457,7 @@ def save_to_emd(self, filename='datacollection.emd'): emd_file.close() def log_info(self): - """Print all relevant information about the EMD instance.""" + """( all relevant information about the EMD instance.""" self._log.debug('Calling log_info') pad_string0 = '-------------------------\n' pad_string1 = '\n-------------------------\n' @@ -404,14 +483,764 @@ def log_info(self): self._log.info(info_str) +def fei_check(filename): + """Function to check if the EMD file is an FEI file. + + Parameters + ---------- + filename : string + The name of the emd-file from which to load the signals. Standard + format is '*.emd'. + + Returns + ------- + Boolean + + """ + with h5py.File(filename, 'r') as f: + if 'Version' in list(f.keys()): + version = f.get('Version') + v_dict = json.loads(version.value[0].decode('utf-8')) + if v_dict['format'] == 'Velox': + return True + + +def _get_keys_from_group(group): + # Return a list of ids of items contains in the group + return list(group.keys()) + + +def _parse_sub_data_group_metadata(sub_data_group): + metadata_array = sub_data_group['Metadata'][:, 0].T + mdata_string = metadata_array.tostring().decode("utf-8") + return json.loads(mdata_string.rstrip('\x00')) + + +def _parse_metadata(data_group, sub_group_key): + return _parse_sub_data_group_metadata(data_group[sub_group_key]) + + +def _parse_detector_name(original_metadata): + try: + name = original_metadata['BinaryResult']['Detector'] + except KeyError: + # if the `BinaryResult/Detector` is not available, there should be + # only one detector in `Detectors` + name = original_metadata['Detectors']['Detector-01']['DetectorName'] + return name + + +def _get_detector_metadata_dict(om, detector_name): + detectors_dict = om['Detectors'] + # find detector dict from the detector_name + for key in detectors_dict: + if detectors_dict[key]['DetectorName'] == detector_name: + return detectors_dict[key] + + +class FeiEMDReader(object): + """ + Class for reading FEI electron microscopy datasets. + + The :class:`~.FeiEMDReader` reads EMD files saved by the FEI Velox + software package. + + Attributes + ---------- + dictionaries: list + List of dictionaries which are passed to the file_reader. + im_type : string + String specifying whether the data is an image, spectrum or + spectrum image. + + """ + + def __init__(self, filename, select_type=None, first_frame=0, + last_frame=None, sum_frames=True, sum_EDS_detectors=True, + rebin_energy=1, SI_dtype=None, load_SI_image_stack=False, + lazy=False): + # TODO: Finish lazy implementation using the `FrameLocationTable` + # Parallelise streams reading + self.filename = filename + self.ureg = pint.UnitRegistry() + self.dictionaries = [] + self.first_frame = first_frame + self.last_frame = last_frame + self.sum_frames = sum_frames + self.sum_EDS_detectors = sum_EDS_detectors + self.rebin_energy = rebin_energy + self.SI_data_dtype = SI_dtype + self.load_SI_image_stack = load_SI_image_stack + self.lazy = lazy + self.detector_name = None + + self.original_metadata = {} + try: + f = h5py.File(filename, 'r') + self.d_grp = f.get('Data') + self._check_im_type() + self._parse_metadata_group(f.get('Operations'), 'Operations') + if self.im_type == 'SpectrumStream': + self.p_grp = f.get('Presentation') + self._parse_image_display() + self._read_data(select_type) + except Exception as e: + raise e + finally: + if not self.lazy: + f.close() + + def _read_data(self, select_type): + self.load_images = self.load_SI = self.load_single_spectrum = True + if select_type == 'single_spectrum': + self.load_images = self.load_SI = False + elif select_type == 'images': + self.load_SI = self.load_single_spectrum = False + elif select_type == 'spectrum_image': + self.load_images = self.load_single_spectrum = False + elif select_type is None: + pass + else: + raise ValueError("`select_type` parameter takes only: `None`, " + "'single_spectrum', 'images' or 'spectrum_image'.") + + if self.im_type == 'Image': + _logger.info('Reading the images.') + self._read_images() + elif self.im_type == 'Spectrum': + self._read_single_spectrum() + self._read_images() + elif self.im_type == 'SpectrumStream': + self._read_single_spectrum() + _logger.info('Reading the spectrum image.') + t0 = time.time() + self._read_images() + t1 = time.time() + self._read_spectrum_stream() + t2 = time.time() + _logger.info('Time to load images: {} s.'.format(t1 - t0)) + _logger.info('Time to load spectrum image: {} s.'.format(t2 - t1)) + + def _check_im_type(self): + if 'Image' in self.d_grp: + if 'SpectrumImage' in self.d_grp: + self.im_type = 'SpectrumStream' + else: + self.im_type = 'Image' + else: + self.im_type = 'Spectrum' + + def _read_single_spectrum(self): + if not self.load_single_spectrum: + return + spectrum_grp = self.d_grp.get("Spectrum") + if spectrum_grp is None: + return # No spectra in the file + self.detector_name = 'EDS' + for spectrum_sub_group_key in _get_keys_from_group(spectrum_grp): + self.dictionaries.append( + self._read_spectrum(spectrum_grp, spectrum_sub_group_key)) + + def _read_spectrum(self, spectrum_group, spectrum_sub_group_key): + spectrum_sub_group = spectrum_group[spectrum_sub_group_key] + dataset = spectrum_sub_group['Data'] + if self.lazy: + data = da.from_array(dataset, chunks=dataset.chunks).T + else: + data = dataset[:].T + original_metadata = _parse_metadata(spectrum_group, + spectrum_sub_group_key) + original_metadata.update(self.original_metadata) + + dispersion, offset, unit = self._get_dispersion_offset( + original_metadata) + axes = [] + if len(data.shape) == 2: + if data.shape[0] == 1: + # squeeze + data = data[0, :] + else: + axes = [{ + 'name': 'Stack', + 'offset': 0, + 'scale': 1, + 'size': data.shape[0], + 'navigate': True, + } + ] + axes.append({ + 'name': 'Energy', + 'offset': offset, + 'scale': dispersion, + 'size': data.shape[-1], + 'units': 'keV', + 'navigate': False}, + ) + + md = self._get_metadata_dict(original_metadata) + md['Signal']['signal_type'] = 'EDS_TEM' + + return {'data': data, + 'axes': axes, + 'metadata': md, + 'original_metadata': original_metadata, + 'mapping': self._get_mapping()} + + def _read_images(self): + # We need to read the image to get the shape of the spectrum image + if not self.load_images and not self.load_SI: + return + # Get the image data group + image_group = self.d_grp.get("Image") + if image_group is None: + return # No images in the file + # Get all the subgroup of the image data group and read the image for + # each of them + for image_sub_group_key in _get_keys_from_group(image_group): + image = self._read_image(image_group, image_sub_group_key) + if not self.load_images: + # If we don't want to load the images, we stop here + return + self.dictionaries.append(image) + + def _read_image(self, image_group, image_sub_group_key): + """ Return a dictionary ready to parse of return to io module""" + image_sub_group = image_group[image_sub_group_key] + original_metadata = _parse_metadata(image_group, image_sub_group_key) + original_metadata.update(self.original_metadata) + if 'Detector' in original_metadata['BinaryResult'].keys(): + self.detector_name = _parse_detector_name(original_metadata) + + read_stack = (self.load_SI_image_stack or self.im_type == 'Image') + h5data = image_sub_group['Data'] + # Get the scanning area shape of the SI from the images + self.spatial_shape = h5data.shape[:-1] + # Set the axes in frame, y, x order + if self.lazy: + data = da.transpose( + da.from_array( + h5data, + chunks=h5data.chunks), + axes=[2, 0, 1]) + else: + # Workaround for a h5py bug https://github.com/h5py/h5py/issues/977 + # Change back to standard API once issue #977 is fixed. + # Preallocate the numpy array and use read_direct method, which is + # much faster in case of chunked data. + data = np.empty(h5data.shape) + h5data.read_direct(data) + data = np.rollaxis(data, axis=2) + + pix_scale = original_metadata['BinaryResult'].get( + 'PixelSize', {'height': 1.0, 'width': 1.0}) + offsets = original_metadata['BinaryResult'].get( + 'Offset', {'x': 0.0, 'y': 0.0}) + original_units = original_metadata['BinaryResult'].get( + 'PixelUnitX', '') + + axes = [] + # stack of images + if not read_stack: + data = data[0:1, ...] + + if data.shape[0] == 1: + # Squeeze + data = data[0, ...] + i = 0 + else: + frame_time = original_metadata['Scan']['FrameTime'] + frame_time, time_unit = self._convert_scale_units( + frame_time, 's', 2 * data.shape[0]) + axes.append({'index_in_array': 0, + 'name': 'Time', + 'offset': 0, + 'scale': frame_time, + 'size': data.shape[0], + 'units': time_unit, + 'navigate': True}) + i = 1 + scale_x = self._convert_scale_units( + pix_scale['width'], original_units, data.shape[i + 1]) + scale_y = self._convert_scale_units( + pix_scale['height'], original_units, data.shape[i]) + offset_x = self._convert_scale_units( + offsets['x'], original_units, data.shape[i + 1]) + offset_y = self._convert_scale_units( + offsets['y'], original_units, data.shape[i]) + axes.extend([{'index_in_array': i, + 'name': 'y', + 'offset': offset_y[0], + 'scale': scale_y[0], + 'size': data.shape[i], + 'units': scale_y[1], + 'navigate': False}, + {'index_in_array': i + 1, + 'name': 'x', + 'offset': offset_x[0], + 'scale': scale_x[0], + 'size': data.shape[i + 1], + 'units': scale_x[1], + 'navigate': False} + ]) + + md = self._get_metadata_dict(original_metadata) + md['Signal']['signal_type'] = 'image' + if self.detector_name is not None: + original_metadata['DetectorMetadata'] = _get_detector_metadata_dict( + original_metadata, + self.detector_name) + if hasattr(self, 'map_label_dict'): + if image_sub_group_key in self.map_label_dict: + md['General']['title'] = self.map_label_dict[image_sub_group_key] + + return {'data': data, + 'axes': axes, + 'metadata': md, + 'original_metadata': original_metadata, + 'mapping': self._get_mapping(map_selected_element=False, + parse_individual_EDS_detector_metadata=False)} + + def _parse_frame_time(self, original_metadata, factor=1): + try: + frame_time = original_metadata['Scan']['FrameTime'] + time_unit = 's' + except KeyError: + frame_time, time_unit = None, t.Undefined + + frame_time, time_unit = self._convert_scale_units( + frame_time, time_unit, factor) + return frame_time, time_unit + + def _parse_image_display(self): + try: + image_display_group = self.p_grp.get('Displays/ImageDisplay') + key_list = _get_keys_from_group(image_display_group) + self.map_label_dict = {} + for key in key_list: + v = json.loads( + image_display_group[key].value[0].decode('utf-8')) + data_key = v['dataPath'].split('/')[-1] # key in data group + self.map_label_dict[data_key] = v['display']['label'] + except KeyError: + _logger.warning("The image label can't be read from the metadata.") + pass + + def _parse_metadata_group(self, group, group_name): + d = {} + try: + for group_key in _get_keys_from_group(group): + subgroup = group.get(group_key) + if hasattr(subgroup, 'keys'): + sub_dict = {} + for subgroup_key in _get_keys_from_group(subgroup): + v = json.loads( + subgroup[subgroup_key].value[0].decode('utf-8')) + sub_dict[subgroup_key] = v + else: + sub_dict = json.loads(subgroup.value[0].decode('utf-8')) + d[group_key] = sub_dict + except IndexError: + _logger.warning("Some metadata can't be read.") + self.original_metadata.update({group_name: d}) + + def _read_spectrum_stream(self): + if not self.load_SI: + return + self.detector_name = 'EDS' + # Try to read the number of frames from Data/SpectrumImage + try: + sig = self.d_grp["SpectrumImage"] + self.number_of_frames = int( + json.loads( + sig[next(iter(sig))] + ["SpectrumImageSettings"][0].decode("utf8") + )["endFramePosition"]) + except Exception as e: + _logger.exception( + "Failed to read the number of frames from Data/SpectrumImage") + self.number_of_frames = None + if self.last_frame is None: + self.last_frame = self.number_of_frames + elif self.number_of_frames and self.last_frame > self.number_of_frames: + raise ValueError( + "The `last_frame` cannot be greater than" + " the number of frames, %i for this file." + % self.number_of_frames + ) + + spectrum_stream_group = self.d_grp.get("SpectrumStream") + if spectrum_stream_group is None: + _logger.warning("No spectrum stream is present in the file. It ", + "is possible that the file has been pruned: use ", + "Velox to read the spectrum image (proprietary " + "format). If you want to open FEI emd file with ", + "HyperSpy don't prune the file when saving it in " + "Velox.") + return + + def _read_stream(key): + stream = FeiSpectrumStream(spectrum_stream_group[key], self) + return stream + + subgroup_keys = _get_keys_from_group(spectrum_stream_group) + if self.sum_EDS_detectors: + if len(subgroup_keys) == 1: + _logger.warning("The file contains only one spectrum stream") + # Read the first stream + s0 = _read_stream(subgroup_keys[0]) + streams = [s0] + # add other stream streams + if len(subgroup_keys) > 1: + for key in subgroup_keys[1:]: + stream_data = spectrum_stream_group[key]['Data'][:].T[0] + if self.lazy: + s0.spectrum_image = ( + s0.spectrum_image + + s0.stream_to_sparse_array(stream_data=stream_data) + ) + else: + s0.stream_to_array(stream_data=stream_data, + spectrum_image=s0.spectrum_image) + else: + streams = [_read_stream(key) for key in subgroup_keys] + if self.lazy: + for stream in streams: + sa = stream.spectrum_image.astype(self.SI_data_dtype) + stream.spectrum_image = sa + + spectrum_image_shape = streams[0].shape + original_metadata = streams[0].original_metadata + original_metadata.update(self.original_metadata) + + pixel_size, offsets, original_units = \ + streams[0].get_pixelsize_offset_unit() + dispersion, offset, unit = self._get_dispersion_offset( + original_metadata) + + scale_x = self._convert_scale_units( + pixel_size['width'], original_units, spectrum_image_shape[1]) + scale_y = self._convert_scale_units( + pixel_size['height'], original_units, spectrum_image_shape[0]) + offset_x = self._convert_scale_units( + offsets['x'], original_units, spectrum_image_shape[1]) + offset_y = self._convert_scale_units( + offsets['y'], original_units, spectrum_image_shape[0]) + + i = 0 + axes = [] + # add a supplementary axes when we import all frames individualy + if not self.sum_frames: + frame_time, time_unit = self._parse_frame_time(original_metadata, + spectrum_image_shape[i]) + axes.append({'index_in_array': i, + 'name': 'Time', + 'offset': 0, + 'scale': frame_time, + 'size': spectrum_image_shape[i], + 'units': time_unit, + 'navigate': True}) + i = 1 + axes.extend([{'index_in_array': i, + 'name': 'y', + 'offset': offset_y[0], + 'scale': scale_y[0], + 'size': spectrum_image_shape[i], + 'units': scale_y[1], + 'navigate': True}, + {'index_in_array': i + 1, + 'name': 'x', + 'offset': offset_x[0], + 'scale': scale_x[0], + 'size': spectrum_image_shape[i + 1], + 'units': scale_x[1], + 'navigate': True}, + {'index_in_array': i + 2, + 'name': 'X-ray energy', + 'offset': offset, + 'scale': dispersion, + 'size': spectrum_image_shape[i + 2], + 'units': unit, + 'navigate': False}]) + + md = self._get_metadata_dict(original_metadata) + md['Signal']['signal_type'] = 'EDS_TEM' + + for stream in streams: + original_metadata = stream.original_metadata + original_metadata.update(self.original_metadata) + self.dictionaries.append({'data': stream.spectrum_image, + 'axes': axes, + 'metadata': md, + 'original_metadata': original_metadata, + 'mapping': self._get_mapping( + parse_individual_EDS_detector_metadata=not self.sum_frames)}) + + def _get_dispersion_offset(self, original_metadata): + try: + for detectorname, detector in original_metadata['Detectors'].items( + ): + if original_metadata['BinaryResult']['Detector'] in detector['DetectorName']: + dispersion = float( + detector['Dispersion']) / 1000.0 * self.rebin_energy + offset = float( + detector['OffsetEnergy']) / 1000.0 + return dispersion, offset, 'keV' + except KeyError: + _logger.warning("The spectrum calibration can't be loaded.") + return 1, 0, t.Undefined + + def _convert_scale_units(self, value, units, factor=1): + if units == t.Undefined: + return value, units + factor /= 2 + v = np.float(value) * self.ureg(units) + converted_v = (factor * v).to_compact() + converted_value = float(converted_v.magnitude / factor) + converted_units = '{:~}'.format(converted_v.units) + return converted_value, converted_units + + def _get_metadata_dict(self, om): + meta_gen = {} + meta_gen['original_filename'] = os.path.split(self.filename)[1] + if self.detector_name is not None: + meta_gen['title'] = self.detector_name + # We have only one entry in the original_metadata, so we can't use + # the mapping of the original_metadata to set the date and time in + # the metadata: need to set it manually here + try: + if 'AcquisitionStartDatetime' in om['Acquisition'].keys(): + unix_time = om['Acquisition']['AcquisitionStartDatetime']['DateTime'] + # Workaround when the 'AcquisitionStartDatetime' key is missing + # This timestamp corresponds to when the data is stored + elif (not isinstance(om['CustomProperties'], str) and + 'Detectors[BM-Ceta].TimeStamp' in om['CustomProperties'].keys()): + unix_time = float( + om['CustomProperties']['Detectors[BM-Ceta].TimeStamp']['value']) / 1E6 + date, time = self._convert_datetime(unix_time).split('T') + meta_gen['date'] = date + meta_gen['time'] = time + meta_gen['time_zone'] = self._get_local_time_zone() + except (UnboundLocalError): + pass + + meta_sig = {} + meta_sig['signal_type'] = '' + + return {'General': meta_gen, 'Signal': meta_sig} + + def _get_mapping(self, map_selected_element=True, + parse_individual_EDS_detector_metadata=True): + mapping = { + 'Acquisition.AcquisitionStartDatetime.DateTime': ( + "General.time_zone", lambda x: self._get_local_time_zone()), + 'Optics.AccelerationVoltage': ( + "Acquisition_instrument.TEM.beam_energy", lambda x: float(x) / 1e3), + 'Optics.CameraLength': ( + "Acquisition_instrument.TEM.camera_length", lambda x: float(x) * 1e3), + 'CustomProperties.StemMagnification.value': ( + "Acquisition_instrument.TEM.magnification", lambda x: float(x)), + 'Instrument.InstrumentClass': ( + "Acquisition_instrument.TEM.microscope", None), + 'Stage.AlphaTilt': ( + "Acquisition_instrument.TEM.Stage.tilt_alpha", + lambda x: '{:.3f}'.format(np.degrees(float(x)))), + 'Stage.BetaTilt': ( + "Acquisition_instrument.TEM.Stage.tilt_beta", + lambda x: '{:.3f}'.format(np.degrees(float(x)))), + 'Stage.Position.x': ( + "Acquisition_instrument.TEM.Stage.x", + lambda x: '{:.6f}'.format(float(x)) + ), + 'Stage.Position.y': ( + "Acquisition_instrument.TEM.Stage.y", + lambda x: '{:.6f}'.format(float(x))), + 'Stage.Position.z': ( + "Acquisition_instrument.TEM.Stage.z", + lambda x: '{:.6f}'.format(float(x))), + 'ImportedDataParameter.Number_of_frames': ( + "Acquisition_instrument.TEM.Detector.EDS.number_of_frames", None), + 'DetectorMetadata.ElevationAngle': ( + "Acquisition_instrument.TEM.Detector.EDS.elevation_angle", + lambda x: '{:.3f}'.format(np.degrees(float(x)))), + 'DetectorMetadata.Gain': ( + "Signal.Noise_properties.Variance_linear_model.gain_factor", + lambda x: '{:.6f}'.format(float(x))), + 'DetectorMetadata.Offset': ( + "Signal.Noise_properties.Variance_linear_model.gain_offset", + lambda x: '{:.6f}'.format(float(x))), + } + + # Parse individual metadata for each EDS detector + if parse_individual_EDS_detector_metadata: + mapping.update({ + 'DetectorMetadata.AzimuthAngle': ( + "Acquisition_instrument.TEM.Detector.EDS.azimuth_angle", + lambda x: '{:.3f}'.format(np.degrees(float(x)))), + 'DetectorMetadata.LiveTime': ( + "Acquisition_instrument.TEM.Detector.EDS.live_time", + lambda x: '{:.6f}'.format(float(x))), + 'DetectorMetadata.RealTime': ( + "Acquisition_instrument.TEM.Detector.EDS.real_time", + lambda x: '{:.6f}'.format(float(x))), + 'DetectorMetadata.DetectorName': ( + "General.title", None), + }) + + # Add selected element + if map_selected_element: + mapping.update({'Operations.ImageQuantificationOperation': ( + 'Sample.elements', + self._convert_element_list), + }) + + return mapping + + def _convert_element_list(self, d): + atomic_number_list = d[d.keys()[0]]['elementSelection'] + return [atomic_number2name[int(atomic_number)] + for atomic_number in atomic_number_list] + + def _convert_datetime(self, unix_time): + # Since we don't know the actual time zone of where the data have been + # acquired, we convert the datetime to the local time for convenience + dt = datetime.fromtimestamp(float(unix_time), tz=tz.tzutc()) + return dt.astimezone(tz.tzlocal()).isoformat().split('+')[0] + + def _get_local_time_zone(self): + return tz.tzlocal().tzname(datetime.today()) + + +# Below some information we have got from FEI about the format of the stream: +# +# The SI data is stored as a spectrum stream, ‘65535’ means next pixel +# (these markers are also called `Gate pulse`), other numbers mean a spectrum +# count in that bin for that pixel. +# For the size of the spectrum image and dispersion you have to look in +# AcquisitionSettings. +# The spectrum image cube itself stored in a compressed format, that is +# not easy to decode. + +class FeiSpectrumStream(object): + """Read spectrum image stored in FEI's stream format + + Once initialized, the instance of this class supports numpy style + indexing and slicing of the data stored in the stream format. + """ + + def __init__(self, stream_group, reader): + self.reader = reader + self.stream_group = stream_group + # Parse acquisition settings to get bin_count and dtype + acquisition_settings_group = stream_group['AcquisitionSettings'] + acquisition_settings = json.loads( + acquisition_settings_group.value[0].decode('utf-8')) + self.bin_count = int(acquisition_settings['bincount']) + if self.bin_count % self.reader.rebin_energy != 0: + raise ValueError('The `rebin_energy` needs to be a divisor of the', + ' total number of channels.') + if self.reader.SI_data_dtype is None: + self.reader.SI_data_dtype = acquisition_settings['StreamEncoding'] + # Parse the rest of the metadata for storage + self.original_metadata = _parse_sub_data_group_metadata(stream_group) + # If last_frame is None, compute it + stream_data = self.stream_group['Data'][:].T[0] + if self.reader.last_frame is None: + # The information could not be retrieved from metadata + # we compute, which involves iterating once over the whole stream. + # This is required to support the `last_frame` feature without + # duplicating the functions as currently numba does not support + # parametetrization. + spatial_shape = self.reader.spatial_shape + last_frame = int( + np.ceil((stream_data == 65535).sum() / + (spatial_shape[0] * spatial_shape[1]))) + self.reader.last_frame = last_frame + self.reader.number_of_frames = last_frame + self.original_metadata['ImportedDataParameter'] = { + 'First_frame': self.reader.first_frame, + 'Last_frame': self.reader.last_frame, + 'Number_of_frames': self.reader.number_of_frames, + 'Rebin_energy': self.reader.rebin_energy, + 'Number_of_channels': self.bin_count, } + # Convert stream to spectrum image + if self.reader.lazy: + self.spectrum_image = self.stream_to_sparse_array( + stream_data=stream_data + ) + else: + self.spectrum_image = self.stream_to_array( + stream_data=stream_data + ) + + @property + def shape(self): + return self.spectrum_image.shape + + def get_pixelsize_offset_unit(self): + om_br = self.original_metadata['BinaryResult'] + return om_br['PixelSize'], om_br['Offset'], om_br['PixelUnitX'] + + def stream_to_sparse_array(self, stream_data): + """Convert stream in sparse array + + Parameters + ---------- + stream_data: array + + """ + # Here we load the stream data into memory, which is fine is the + # arrays are small. We could load them lazily when lazy. + stream_data = self.stream_group['Data'][:].T[0] + sparse_array = stream_readers.stream_to_sparse_COO_array( + stream_data=stream_data, + spatial_shape=self.reader.spatial_shape, + first_frame=self.reader.first_frame, + last_frame=self.reader.last_frame, + channels=self.bin_count, + sum_frames=self.reader.sum_frames, + rebin_energy=self.reader.rebin_energy, + ) + return sparse_array + + def stream_to_array(self, stream_data, spectrum_image=None): + """Convert stream to array. + + Parameters + ---------- + stream_data: array + spectrum_image: array or None + If array, the data from the stream are added to the array. + Otherwise it creates a new array and returns it. + + """ + spectrum_image = stream_readers.stream_to_array( + stream=stream_data, + spatial_shape=self.reader.spatial_shape, + channels=self.bin_count, + first_frame=self.reader.first_frame, + last_frame=self.reader.last_frame, + rebin_energy=self.reader.rebin_energy, + sum_frames=self.reader.sum_frames, + spectrum_image=spectrum_image, + dtype=self.reader.SI_data_dtype, + ) + return spectrum_image + + def file_reader(filename, log_info=False, lazy=False, **kwds): - emd = EMD.load_from_emd(filename, lazy) - if log_info: - emd.log_info() dictionaries = [] - for signal in emd.signals.values(): - dictionaries.append(signal._to_dictionary()) + if fei_check(filename) == True: + _logger.debug('EMD is FEI format') + emd = FeiEMDReader(filename, lazy=lazy, **kwds) + dictionaries = emd.dictionaries + else: + emd = EMD.load_from_emd(filename, lazy, **kwds) + if log_info: + emd.log_info() + for signal in emd.signals.values(): + dictionaries.append(signal._to_dictionary()) + return dictionaries diff --git a/hyperspy/io_plugins/fei.py b/hyperspy/io_plugins/fei.py index 52517eb55c..0168052ac6 100644 --- a/hyperspy/io_plugins/fei.py +++ b/hyperspy/io_plugins/fei.py @@ -44,7 +44,6 @@ # Recognised file extension file_extensions = ser_extensions + emi_extensions default_extension = 0 - # Writing capabilities writes = False # ---------------------- @@ -566,8 +565,9 @@ def ser_reader(filename, objects=None, *args, **kwds): axis['units'] = 'nm' axis['scale'] *= 10 ** 9 elif axis['units'] == '1/meters': - axis['units'] = '1/nm' + axis['units'] = '1 / nm' axis['scale'] /= 10 ** 9 + # Remove Nones from array_shape caused by squeezing size 1 dimensions array_shape = [dim for dim in array_shape if dim is not None] lazy = kwds.pop('lazy', False) diff --git a/hyperspy/io_plugins/hspy.py b/hyperspy/io_plugins/hspy.py index a488e6c5c4..9fa4054862 100644 --- a/hyperspy/io_plugins/hspy.py +++ b/hyperspy/io_plugins/hspy.py @@ -37,12 +37,10 @@ format_name = 'HSPY' description = \ 'The default file format for HyperSpy based on the HDF5 standard' - full_support = False # Recognised file extension file_extensions = ['hspy', 'hdf5'] default_extension = 0 - # Writing capabilities writes = True version = "3.0" diff --git a/hyperspy/io_plugins/image.py b/hyperspy/io_plugins/image.py index a7f1392f25..f81e3d2d22 100644 --- a/hyperspy/io_plugins/image.py +++ b/hyperspy/io_plugins/image.py @@ -30,8 +30,6 @@ file_extensions = ['png', 'bmp', 'dib', 'gif', 'jpeg', 'jpe', 'jpg', 'msp', 'pcx', 'ppm', "pbm", "pgm", 'xbm', 'spi', ] default_extension = 0 # png - - # Writing features writes = [(2, 0), ] # ---------------------- diff --git a/hyperspy/io_plugins/mrc.py b/hyperspy/io_plugins/mrc.py index 5c66e9ebc3..d00a6f4a87 100644 --- a/hyperspy/io_plugins/mrc.py +++ b/hyperspy/io_plugins/mrc.py @@ -39,7 +39,6 @@ # Recognised file extension file_extensions = ['mrc', 'MRC', 'ALI', 'ali'] default_extension = 0 - # Writing capabilities writes = False diff --git a/hyperspy/io_plugins/mrcz.py b/hyperspy/io_plugins/mrcz.py new file mode 100644 index 0000000000..8bf59b073e --- /dev/null +++ b/hyperspy/io_plugins/mrcz.py @@ -0,0 +1,135 @@ +# -*- coding: utf-8 -*- +# Copyright 2007-2015 The HyperSpy developers +# +# This file is part of HyperSpy. +# +# HyperSpy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HyperSpy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HyperSpy. If not, see . + +import mrcz as _mrcz +import logging + + +_logger = logging.getLogger(__name__) +# Plugin characteristics +# ---------------------- +format_name = 'MRCZ' +description = 'Compressed MRC file format extension with blosc meta-compression' +full_support = False +# Recognised file extension +file_extensions = ['mrc', 'MRC', 'mrcz', 'MRCZ'] +default_extension = 2 +# Writing capabilities: +writes = True + + +_POP_FROM_HEADER = ['compressor', 'MRCtype', 'C3', 'dimensions', 'dtype', + 'extendedBytes', 'gain', 'maxImage', 'minImage', 'meanImage', + 'metaId', 'packedBytes', 'pixelsize', 'pixelunits', 'voltage'] +# Hyperspy uses an unusual mixed Fortran- and C-ordering scheme +_READ_ORDER = [1, 2, 0] +_WRITE_ORDER = [0, 2, 1] + + +mapping = { + 'mrcz_header.voltage': + ("Acquisition_instrument.TEM.beam_energy", + lambda x: x[0]), + 'mrcz_header.gain': + ("Signal.Noise_properties.Variance_linear_model.gain_factor", + lambda x: x[0]), + # There is no metadata field for spherical aberration + #'mrcz_header.C3': + #("Acquisition_instrument.TEM.C3", lambda x: x), +} + + +def file_reader(filename, endianess='<', lazy=False, mmap_mode='c', + **kwds): + _logger.debug("Reading MRCZ file: %s" % filename) + + if mmap_mode != 'c': + # Note also that MRCZ does not support memory-mapping of compressed data. + # Perhaps we could use the zarr package for that + raise ValueError('MRCZ supports only C-ordering memory-maps') + + mrcz_endian = 'le' if endianess == '<' else 'be' + data, mrcz_header = _mrcz.readMRC(filename, endian=mrcz_endian, + useMemmap=lazy, + pixelunits='nm', + **kwds) + + # Create the axis objects for each axis + names = ['y', 'x', 'z'] + navigate = [False, False, True] + axes = [{'size': data.shape[hsIndex], + 'index_in_array': hsIndex, + 'name': names[index], + 'scale': mrcz_header['pixelsize'][hsIndex], + 'offset': 0.0, + 'units': mrcz_header['pixelunits'], + 'navigate': nav} + for index, (hsIndex, nav) in enumerate(zip(_READ_ORDER, navigate))] + axes.insert(0, axes.pop(2)) # re-order the axes + + metadata = mrcz_header.copy() + # Remove non-standard fields + for popTarget in _POP_FROM_HEADER: + metadata.pop(popTarget) + + dictionary = {'data': data, + 'axes': axes, + 'metadata': metadata, + 'original_metadata': {'mrcz_header': mrcz_header}, + 'mapping': mapping, } + + return [dictionary, ] + + +def file_writer(filename, signal, do_async=False, compressor=None, clevel=1, + n_threads=None, **kwds): + import hyperspy.signals + if not isinstance(signal, + (hyperspy.signals.Signal2D, hyperspy.signals.ComplexSignal2D)): + raise TypeError("MRCZ supports 2D and 3D data only. type(signal) is " + "{}".format(type(signal))) + + endianess = kwds.pop('endianess', '<') + mrcz_endian = 'le' if endianess == '<' else 'be' + + meta = signal.metadata.as_dictionary() + + # Get pixelsize and pixelunits from the axes + pixelunits = signal.axes_manager[-1].units + + pixelsize = [signal.axes_manager[I].scale for I in _WRITE_ORDER] + + # Strip out voltage from meta-data + voltage = signal.metadata.get_item( + 'Acquisition_instrument.TEM.beam_energy') + # There aren't hyperspy fields for spherical aberration or detector gain + C3 = 0.0 + gain = signal.metadata.get_item("Signal.Noise_properties." + "Variance_linear_model.gain_factor", 1.0) + if do_async: + _mrcz.asyncWriteMRC(signal.data, filename, meta=meta, endian=mrcz_endian, + pixelsize=pixelsize, pixelunits=pixelunits, + voltage=voltage, C3=C3, gain=gain, + compressor=compressor, clevel=clevel, + n_threads=n_threads) + else: + _mrcz.writeMRC(signal.data, filename, meta=meta, endian=mrcz_endian, + pixelsize=pixelsize, pixelunits=pixelunits, + voltage=voltage, C3=C3, gain=gain, + compressor=compressor, clevel=clevel, + n_threads=n_threads) diff --git a/hyperspy/io_plugins/msa.py b/hyperspy/io_plugins/msa.py index 00cb7639b6..c2a887affd 100644 --- a/hyperspy/io_plugins/msa.py +++ b/hyperspy/io_plugins/msa.py @@ -39,7 +39,7 @@ full_support = False file_extensions = ('msa', 'ems', 'mas', 'emsa', 'EMS', 'MAS', 'EMSA', 'MSA') default_extension = 0 - +# Writing capabilities writes = [(1, 0), ] # ---------------------- @@ -198,13 +198,13 @@ def parse_msa_string(string, filename=None): if clean_par in keywords: try: parameters[parameter] = keywords[clean_par]['dtype'](value) - except: + except BaseException: # Normally the offending mispelling is a space in the scientic # notation, e.g. 2.0 E-06, so we try to correct for it try: parameters[parameter] = keywords[clean_par]['dtype']( value.replace(' ', '')) - except: + except BaseException: _logger.exception( "The %s keyword value, %s could not be converted to " "the right type", parameter, value) @@ -230,16 +230,16 @@ def parse_msa_string(string, filename=None): try: time = dt.strptime(parameters['TIME'], "%H:%M") mapped.set_item('General.time', time.time().isoformat()) - except: + except BaseException: if 'TIME' in parameters and parameters['TIME']: _logger.warning('The time information could not be retrieved') try: date = dt.strptime(parameters['DATE'], "%d-%b-%Y") mapped.set_item('General.date', date.date().isoformat()) - except: + except BaseException: if 'DATE' in parameters and parameters['DATE']: _logger.warning('The date information could not be retrieved') - except: + except BaseException: warnings.warn("I couldn't read the date information due to" "an unexpected error. Please report this error to " "the developers") @@ -335,7 +335,7 @@ def file_writer(filename, signal, format=None, separator=', ', if md.has_item("General.time"): time = dt.strptime(md.General.time, "%H:%M:%S") loc_kwds['TIME'] = time.strftime("%H:%M") - except: + except BaseException: warnings.warn( "I couldn't write the date information due to" "an unexpected error. Please report this error to " diff --git a/hyperspy/io_plugins/netcdf.py b/hyperspy/io_plugins/netcdf.py index d49952667e..ececfdab62 100644 --- a/hyperspy/io_plugins/netcdf.py +++ b/hyperspy/io_plugins/netcdf.py @@ -45,8 +45,6 @@ full_support = True file_extensions = ('nc', 'NC') default_extension = 0 - - # Writing features writes = False diff --git a/hyperspy/io_plugins/protochips.py b/hyperspy/io_plugins/protochips.py index 34cc5f143a..5bffc52955 100644 --- a/hyperspy/io_plugins/protochips.py +++ b/hyperspy/io_plugins/protochips.py @@ -60,7 +60,7 @@ def _protochips_log_reader(csv_file): for key in csv_file.logged_quantity_name_list: try: csvs.append(csv_file.get_dictionary(key)) - except: + except BaseException: raise IOError(invalid_file_error) return csvs @@ -138,7 +138,8 @@ def _get_metadata_time_axis(self): def _read_data(self, header_line_number): names = [name.replace(' ', '_') for name in self.column_name] # Necessary for numpy >= 1.14 - kwargs = {'encoding':'latin1'} if np.__version__ >= LooseVersion("1.14") else {} + kwargs = {'encoding': 'latin1'} if np.__version__ >= LooseVersion("1.14") else { + } data = np.genfromtxt(self.filename, delimiter=',', dtype=None, names=names, skip_header=header_line_number, diff --git a/hyperspy/io_plugins/ripple.py b/hyperspy/io_plugins/ripple.py index 46deb0b57f..e44035a3ff 100644 --- a/hyperspy/io_plugins/ripple.py +++ b/hyperspy/io_plugins/ripple.py @@ -25,10 +25,10 @@ import os.path from io import StringIO import logging +import traits.api as t import numpy as np -from hyperspy.misc.io.utils_readfile import * from hyperspy import Release from hyperspy.misc.utils import DictionaryTreeBrowser @@ -510,6 +510,8 @@ def file_reader(filename, rpl_info=None, encoding="latin-1", mp.set_item('Acquisition_instrument.TEM.Detector.EDS.live_time', rpl_info['live-time']) + units = [t.Undefined if unit == '' else unit for unit in units] + axes = [] index_in_array = 0 for i in range(3): diff --git a/hyperspy/io_plugins/tiff.py b/hyperspy/io_plugins/tiff.py index 1df234e439..d96ac0d95d 100644 --- a/hyperspy/io_plugins/tiff.py +++ b/hyperspy/io_plugins/tiff.py @@ -30,6 +30,8 @@ from hyperspy.misc.utils import DictionaryTreeBrowser _logger = logging.getLogger(__name__) + + # Plugin characteristics # ---------------------- format_name = 'TIFF' @@ -38,12 +40,11 @@ full_support = False file_extensions = ['tif', 'tiff'] default_extension = 0 # tif - - # Writing features writes = [(2, 0), (2, 1)] # ---------------------- + axes_label_codes = { 'X': "width", 'Y': "height", @@ -63,6 +64,7 @@ 'Q': t.Undefined, '_': t.Undefined} + ureg = pint.UnitRegistry() @@ -77,8 +79,6 @@ def file_writer(filename, signal, export_scale=True, extratags=[], **kwds): default: True Export the scale and the units (compatible with DM and ImageJ) to appropriate tags. - If the scikit-image version is too old, use the hyperspy embedded - tifffile library to allow exporting the scale and the unit. """ _logger.debug('************* Saving *************') data = signal.data @@ -201,7 +201,7 @@ def file_reader(filename, record_by='image', force_read_resolution=False, elif name in ['depth', 'image series', 'time']: scales[i], units[i] = scales_d['z'], units_d['z'] offsets[i] = offsets_d['z'] - except: + except BaseException: _logger.info("Scale and units could not be imported") axes = [{'size': size, @@ -272,6 +272,27 @@ def _parse_scale_unit(tiff, op, shape, force_read_resolution): units = {axis: t.Undefined for axis in axes_l} intensity_axis = {} + # for files created with imageJ + if tiff[0].is_imagej: + image_description = _decode_string(op["image_description"]) + if "image_description_1" in op: + image_description = _decode_string(op["image_description_1"]) + _logger.debug( + "Image_description tag: {0}".format(image_description)) + if 'ImageJ' in image_description: + _logger.debug("Reading ImageJ tif metadata") + # ImageJ write the unit in the image description + if 'unit' in image_description: + unit = image_description.split('unit=')[1].splitlines()[0] + if unit == 'micron': + unit = 'µm' + for key in ['x', 'y']: + units[key] = unit + scales['x'], scales['y'] = _get_scales_from_x_y_resolution(op) + if 'spacing' in image_description: + scales['z'] = float( + image_description.split('spacing=')[1].splitlines()[0]) + # for files created with DM if '65003' in op: _logger.debug("Reading Gatan DigitalMicrograph tif metadata") @@ -300,25 +321,6 @@ def _parse_scale_unit(tiff, op, shape, force_read_resolution): if '65025' in op: intensity_axis['scale'] = op['65025'] # intensity scale - # for files created with imageJ - if tiff[0].is_imagej: - image_description = _decode_string(op["image_description"]) - if "image_description_1" in op: - image_description = _decode_string(op["image_description_1"]) - _logger.debug( - "Image_description tag: {0}".format(image_description)) - if 'ImageJ' in image_description: - _logger.debug("Reading ImageJ tif metadata") - # ImageJ write the unit in the image description - if 'unit' in image_description: - unit = image_description.split('unit=')[1].splitlines()[0] - for key in ['x', 'y']: - units[key] = unit - scales['x'], scales['y'] = _get_scales_from_x_y_resolution(op) - if 'spacing' in image_description: - scales['z'] = float( - image_description.split('spacing=')[1].splitlines()[0]) - # for FEI Helios tiff files (apparently, works also for Quanta): elif 'helios_metadata' in op or 'sfeg_metadata' in op: _logger.debug("Reading FEI tif metadata") @@ -428,7 +430,7 @@ def _get_dm_kwargs_extratag(signal, scales, units, offsets): if md.has_item('Signal.quantity'): try: intensity_units = md.Signal.quantity - except: + except BaseException: _logger.info("The units of the 'intensity axes' couldn't be" "retrieved, please report the bug.") intensity_units = "" @@ -439,7 +441,7 @@ def _get_dm_kwargs_extratag(signal, scales, units, offsets): dic = md.Signal.Noise_properties.Variance_linear_model intensity_offset = dic.gain_offset intensity_scale = dic.gain_factor - except: + except BaseException: _logger.info("The scale or the offset of the 'intensity axes'" "couldn't be retrieved, please report the bug.") intensity_offset = 0.0 @@ -489,7 +491,7 @@ def _imagej_description(version='1.11a', **kwargs): def _decode_string(string): try: string = string.decode('utf8') - except: + except BaseException: # Sometimes the strings are encoded in latin-1 instead of utf8 string = string.decode('latin-1', errors='ignore') return string diff --git a/hyperspy/misc/array_tools.py b/hyperspy/misc/array_tools.py index 320158a60e..886d0a3098 100644 --- a/hyperspy/misc/array_tools.py +++ b/hyperspy/misc/array_tools.py @@ -24,6 +24,7 @@ import numpy as np from hyperspy.misc.math_tools import anyfloatin +from hyperspy.decorators import jit_ifnumba _logger = logging.getLogger(__name__) @@ -207,15 +208,7 @@ def rebin(a, new_shape=None, scale=None, crop=True): " error") -def jit_ifnumba(func): - try: - import numba - return numba.jit(func, nopython=True) - except ImportError: - return func - - -@jit_ifnumba +@jit_ifnumba() def _linear_bin_loop(result, data, scale): for j in range(result.shape[0]): # Begin by determining the upper and lower limits of a given new pixel. diff --git a/hyperspy/misc/eels/tools.py b/hyperspy/misc/eels/tools.py index 375b825435..d8c5634c95 100644 --- a/hyperspy/misc/eels/tools.py +++ b/hyperspy/misc/eels/tools.py @@ -249,14 +249,14 @@ def eels_constant(s, zlp, t): # Mapped parameters try: e0 = s.metadata.Acquisition_instrument.TEM.beam_energy - except: + except BaseException: raise AttributeError("Please define the beam energy." "You can do this e.g. by using the " "set_microscope_parameters method") try: beta = s.metadata.Acquisition_instrument.\ TEM.Detector.EELS.collection_angle - except: + except BaseException: raise AttributeError("Please define the collection semi-angle." "You can do this e.g. by using the " "set_microscope_parameters method") @@ -273,18 +273,20 @@ def eels_constant(s, zlp, t): if zlp.axes_manager.signal_dimension == 0: i0 = zlp.data else: - i0 = zlp.data.sum(axis.index_in_array) + i0 = zlp.integrate1D(axis.index_in_axes_manager).data else: raise ValueError('The ZLP signal dimensions are not ' 'compatible with the dimensions of the ' 'low-loss signal') - i0 = i0.reshape( - np.insert(i0.shape, axis.index_in_array, 1)) + # The following prevents errors if the signal is a single spectrum + if len(i0) != 1: + i0 = i0.reshape( + np.insert(i0.shape, axis.index_in_array, 1)) elif isinstance(zlp, numbers.Number): i0 = zlp else: - raise ValueError('The zero-loss peak input must be a Hyperspy signal\ - or a number.') + raise ValueError('The zero-loss peak input is not valid, it must be\ + in the BaseSignal class or a Number.') if isinstance(t, hyperspy.signal.BaseSignal): if (t.axes_manager.navigation_dimension == diff --git a/hyperspy/misc/elements.py b/hyperspy/misc/elements.py index 4753bb9f06..d5ce6ef2ab 100644 --- a/hyperspy/misc/elements.py +++ b/hyperspy/misc/elements.py @@ -3581,3 +3581,16 @@ 'name': 'molybdenum'}}} elements_db = utils.DictionaryTreeBrowser(elements) + +# read dictionary of atomic numbers from HyperSpy, and add the elements that +# do not currently exist in the database (in case anyone is doing EDS on +# Ununpentium...) +atomic_number2name = dict((p.General_properties.Z, e) + for (e, p) in elements_db) +atomic_number2name.update({93: 'Np', 94: 'Pu', 95: 'Am', 96: 'Cm', 97: 'Bk', + 98: 'Cf', 99: 'Es', 100: 'Fm', 101: 'Md', 102: 'No', + 103: 'Lr', 104: 'Rf', 105: 'Db', 106: 'Sg', + 107: 'Bh', 108: 'Hs', 109: 'Mt', 110: 'Ds', + 111: 'Rg', 112: 'Cp', 113: 'Uut', 114: 'Uuq', + 115: 'Uup', 116: 'Uuh', 117: 'Uus', 118: 'Uuo', + 119: 'Uue'}) diff --git a/hyperspy/misc/holography/reconstruct.py b/hyperspy/misc/holography/reconstruct.py index 12de2414e3..7fcdebaa14 100644 --- a/hyperspy/misc/holography/reconstruct.py +++ b/hyperspy/misc/holography/reconstruct.py @@ -25,7 +25,7 @@ def estimate_sideband_position( - holo_data, holo_sampling, central_band_mask_radius=None, sb='lower'): + holo_data, holo_sampling, central_band_mask_radius=None, sb='lower', high_cf=True): """ Finds the position of the sideband and returns its position. @@ -38,44 +38,57 @@ def estimate_sideband_position( central_band_mask_radius: float, optional The aperture radius used to mask out the centerband. sb : str, optional - Chooses which sideband is taken. 'lower' or 'upper' + Chooses which sideband is taken. 'lower', 'upper', 'left', or 'right'. + high_cf : bool, optional + If False, the highest carrier frequency allowed for the sideband location is equal to + half of the Nyquist frequency (Default: True). Returns ------- Tuple of the sideband position (y, x), referred to the unshifted FFT. """ - sb_position = (0, 0) - f_freq = freq_array(holo_data.shape, holo_sampling) - - # If aperture radius of centerband is not given, it will be set to 5 % of the Nyquist - # frequency. + # If aperture radius of centerband is not given, it will be set to 5 % of + # the Nyquist frequ.: if central_band_mask_radius is None: - central_band_mask_radius = 1 / 20. * np.max(f_freq) - + central_band_mask_radius = 0.05 * np.max(f_freq) # A small aperture masking out the centerband. - aperture_central_band = np.subtract( - 1.0, aperture_function( - f_freq, central_band_mask_radius, 1e-6)) # 1e-6 - # imitates 0 - + ap_cb = 1.0 - aperture_function(f_freq, central_band_mask_radius, 1e-6) + if not high_cf: # Cut out higher frequencies, if necessary: + ap_cb *= aperture_function(f_freq, + np.max(f_freq) / (2 * np.sqrt(2)), + 1e-6) + # Imitates 0: fft_holo = fft2(holo_data) / np.prod(holo_data.shape) - fft_filtered = fft_holo * aperture_central_band - + fft_filtered = fft_holo * ap_cb # Sideband position in pixels referred to unshifted FFT + cb_position = ( + fft_filtered.shape[0] // + 2, + fft_filtered.shape[1] // + 2) # cb: center band if sb == 'lower': - fft_sb = fft_filtered[:int(fft_filtered.shape[0] / 2), :] + fft_sb = np.abs(fft_filtered[:cb_position[0], :]) sb_position = np.asarray( np.unravel_index( fft_sb.argmax(), fft_sb.shape)) elif sb == 'upper': - fft_sb = fft_filtered[int(fft_filtered.shape[0] / 2):, :] + fft_sb = np.abs(fft_filtered[cb_position[0]:, :]) sb_position = (np.unravel_index(fft_sb.argmax(), fft_sb.shape)) + sb_position = np.asarray(np.add(sb_position, (cb_position[0], 0))) + elif sb == 'left': + fft_sb = np.abs(fft_filtered[:, :cb_position[1]]) sb_position = np.asarray( - np.add(sb_position, (int(fft_filtered.shape[0] / 2), 0))) - + np.unravel_index( + fft_sb.argmax(), + fft_sb.shape)) + elif sb == 'right': + fft_sb = np.abs(fft_filtered[:, cb_position[1]:]) + sb_position = (np.unravel_index(fft_sb.argmax(), fft_sb.shape)) + sb_position = np.asarray(np.add(sb_position, (0, cb_position[1]))) + # Return sideband position: return sb_position @@ -106,8 +119,8 @@ def estimate_sideband_size(sb_position, holo_shape, sb_size_ratio=0.5): return np.min(np.linalg.norm(h, axis=1)) -def reconstruct(holo_data, holo_sampling, sb_size, sb_position, sb_smoothness, output_shape=None, - plotting=False): +def reconstruct(holo_data, holo_sampling, sb_size, sb_position, sb_smoothness, + output_shape=None, plotting=False): """Core function for holographic reconstruction. Parameters diff --git a/hyperspy/misc/holography/tools.py b/hyperspy/misc/holography/tools.py new file mode 100644 index 0000000000..704b71f3af --- /dev/null +++ b/hyperspy/misc/holography/tools.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- +# Copyright 2007-2017 The HyperSpy developers +# +# This file is part of HyperSpy. +# +# HyperSpy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HyperSpy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HyperSpy. If not, see . + +import numpy as np +import matplotlib.pyplot as plt +from scipy.fftpack import fft2, fftshift +import logging + +_logger = logging.getLogger(__name__) + + +def calculate_carrier_frequency(holo_data, sb_position, scale): + """ + Calculates fringe carrier frequency of a hologram + + Parameters + ---------- + holo_data: ndarray + The data of the hologram. + sb_position: tuple + Position of the sideband with the reference to non-shifted FFT + scale: tuple + Scale of the axes that will be used for the calculation. + + Returns + ------- + Carrier frequency + """ + + shape = holo_data.shape + origins = [np.array((0, 0)), + np.array((0, shape[1])), + np.array((shape[0], shape[1])), + np.array((shape[0], 0))] + origin_index = np.argmin( + [np.linalg.norm(origin - sb_position) for origin in origins]) + return np.linalg.norm(np.multiply( + origins[origin_index] - sb_position, scale)) + + +def estimate_fringe_contrast_fourier( + holo_data, sb_position, apodization='hanning'): + """ + Estimates average fringe contrast of a hologram by dividing amplitude + of maximum pixel of sideband by amplitude of FFT's origin. + + Parameters + ---------- + holo_data: ndarray + The data of the hologram. + sb_position: tuple + Position of the sideband with the reference to non-shifted FFT + apodization: string, None + Use 'hanning', 'hamming' or None to apply apodization window in real space before FFT + Apodization is typically needed to suppress the striking due to sharp edges + of the which often results in underestimation of the fringe contrast. (Default: 'hanning') + + Returns + ------- + Fringe contrast as a float + """ + + holo_shape = holo_data.shape + + if apodization: + if apodization == 'hanning': + window_x = np.hanning(holo_shape[0]) + window_y = np.hanning(holo_shape[1]) + elif apodization == 'hamming': + window_x = np.hamming(holo_shape[0]) + window_y = np.hamming(holo_shape[1]) + window_2d = np.sqrt(np.outer(window_x, window_y)) + data = holo_data * window_2d + else: + data = holo_data + + fft_exp = fft2(data) + + return 2 * np.abs(fft_exp[tuple(sb_position)]) / np.abs(fft_exp[0, 0]) diff --git a/hyperspy/misc/io/fei_stream_readers.py b/hyperspy/misc/io/fei_stream_readers.py new file mode 100644 index 0000000000..7ea3b56ee2 --- /dev/null +++ b/hyperspy/misc/io/fei_stream_readers.py @@ -0,0 +1,356 @@ +import numpy as np +import dask.array as da + +from hyperspy.decorators import jit_ifnumba + + +try: + import sparse + sparse_installed = True + + class DenseSliceCOO(sparse.COO): + """Just like sparse.COO, but returning a dense array on indexing/slicing""" + + def __getitem__(self, *args, **kwargs): + obj = super().__getitem__(*args, **kwargs) + try: + return obj.todense() + except AttributeError: + # Indexing, unlike slicing, returns directly the content + return obj +except ImportError: + sparse_installed = False + + +@jit_ifnumba() +def _stream_to_sparse_COO_array_sum_frames( + stream_data, last_frame, shape, channels, rebin_energy=1, first_frame=0): + navigation_index = 0 + frame_number = 0 + ysize, xsize = shape + frame_size = xsize * ysize + data_list = [] + coords_list = [] + data = 0 + count_channel = None + for value in stream_data: + if frame_number < first_frame: + if value != 65535: # Same spectrum + continue + else: + navigation_index += 1 + if navigation_index == frame_size: + frame_number += 1 + navigation_index = 0 + continue + # when we reach the end of the frame, reset the navigation index to 0 + if navigation_index == frame_size: + navigation_index = 0 + frame_number += 1 + if frame_number == last_frame: + break + # if different of ‘65535’, add a count to the corresponding channel + if value != 65535: # Same spectrum + if data: + if value == count_channel: # Same channel, add a count + data += 1 + else: # a new channel, same spectrum—requires new coord + # Store previous channel + coords_list.append(( + int(navigation_index // xsize), + int(navigation_index % xsize), + int(count_channel // rebin_energy)) + ) + data_list.append(data) + # Add a count to new channel + data = 1 + # Update count channel as this is a new channel + count_channel = value + + else: # First non-zero channel of spectrum + data = 1 + # Update count channel as this is a new channel + count_channel = value + + else: # Advances one pixel + if data: # Only store coordinates if the spectrum was not empty + coords_list.append(( + int(navigation_index // xsize), + int(navigation_index % xsize), + int(count_channel // rebin_energy)) + ) + data_list.append(data) + navigation_index += 1 + data = 0 + + # Store data at the end if any (there is no final 65535 to mark the end + # of the stream) + if data: # Only store coordinates if the spectrum was not empty + coords_list.append(( + int(navigation_index // xsize), + int(navigation_index % xsize), + int(count_channel // rebin_energy)) + ) + data_list.append(data) + + final_shape = (ysize, xsize, channels // rebin_energy) + coords = np.array(coords_list).T + data = np.array(data_list) + return coords, data, final_shape + + +@jit_ifnumba() +def _stream_to_sparse_COO_array( + stream_data, last_frame, shape, channels, rebin_energy=1, first_frame=0): + navigation_index = 0 + frame_number = 0 + ysize, xsize = shape + frame_size = xsize * ysize + data_list = [] + coords = [] + data = 0 + count_channel = None + for value in stream_data: + if frame_number < first_frame: + if value != 65535: # Same spectrum + continue + else: + navigation_index += 1 + if navigation_index == frame_size: + frame_number += 1 + navigation_index = 0 + continue + # when we reach the end of the frame, reset the navigation index to 0 + if navigation_index == frame_size: + navigation_index = 0 + frame_number += 1 + if frame_number == last_frame: + break + # if different of ‘65535’, add a count to the corresponding channel + if value != 65535: # Same spectrum + if data: + if value == count_channel: # Same channel, add a count + data += 1 + else: # a new channel, same spectrum—requires new coord + # Store previous channel + coords.append(( + frame_number - first_frame, + int(navigation_index // xsize), + int(navigation_index % xsize), + int(count_channel // rebin_energy)) + ) + data_list.append(data) + # Add a count to new channel + data = 1 + # Update count channel as this is a new channel + count_channel = value + + else: # First non-zero channel of spectrum + data = 1 + # Update count channel as this is a new channel + count_channel = value + + else: # Advances one pixel + if data: # Only store coordinates if the spectrum was not empty + coords.append(( + frame_number - first_frame, + int(navigation_index // xsize), + int(navigation_index % xsize), + int(count_channel // rebin_energy)) + ) + data_list.append(data) + navigation_index += 1 + data = 0 + + # Store data at the end if any (there is no final 65535 to mark the end of + # the stream) + if data: # Only store coordinates if the spectrum was not empty + coords.append(( + frame_number - first_frame, + int(navigation_index // xsize), + int(navigation_index % xsize), + int(count_channel // rebin_energy)) + ) + data_list.append(data) + + final_shape = (last_frame - first_frame, ysize, xsize, + channels // rebin_energy) + coords = np.array(coords).T + data = np.array(data_list) + return coords, data, final_shape + + +def stream_to_sparse_COO_array( + stream_data, spatial_shape, channels, last_frame, rebin_energy=1, + sum_frames=True, first_frame=0, ): + """Returns data stored in a FEI stream as a nd COO array + + Parameters + ---------- + stream_data: numpy array + spatial_shape: tuple of ints + (ysize, xsize) + channels: ints + Number of channels in the spectrum + rebin_energy: int + Rebin the spectra. The default is 1 (no rebinning applied) + sum_frames: bool + If True, sum all the frames + + """ + if not sparse_installed: + raise ImportError( + "The python-sparse package is not installed and it is required " + "for lazy loading of SIs stored in FEI EMD stream format." + ) + if sum_frames: + coords, data, shape = _stream_to_sparse_COO_array_sum_frames( + stream_data=stream_data, + shape=spatial_shape, + channels=channels, + rebin_energy=rebin_energy, + first_frame=first_frame, + last_frame=last_frame, + ) + else: + coords, data, shape = _stream_to_sparse_COO_array( + stream_data=stream_data, + shape=spatial_shape, + channels=channels, + rebin_energy=rebin_energy, + first_frame=first_frame, + last_frame=last_frame, + ) + dense_sparse = DenseSliceCOO(coords=coords, data=data, shape=shape) + dask_sparse = da.from_array(dense_sparse, chunks="auto") + return dask_sparse + + +@jit_ifnumba() +def _fill_array_with_stream_sum_frames(spectrum_image, stream, + first_frame, last_frame, rebin_energy=1): + # jit speeds up this function by a factor of ~ 30 + navigation_index = 0 + frame_number = 0 + shape = spectrum_image.shape + for count_channel in np.nditer(stream): + # when we reach the end of the frame, reset the navigation index to 0 + if navigation_index == (shape[0] * shape[1]): + navigation_index = 0 + frame_number += 1 + # break the for loop when we reach the last frame we want to read + if frame_number == last_frame: + break + # if different of ‘65535’, add a count to the corresponding channel + if count_channel != 65535: + if first_frame <= frame_number: + spectrum_image[navigation_index // shape[1], + navigation_index % shape[1], + count_channel // rebin_energy] += 1 + else: + navigation_index += 1 + + +@jit_ifnumba() +def _fill_array_with_stream(spectrum_image, stream, first_frame, + last_frame, rebin_energy=1): + navigation_index = 0 + frame_number = 0 + shape = spectrum_image.shape + for count_channel in np.nditer(stream): + # when we reach the end of the frame, reset the navigation index to 0 + if navigation_index == (shape[1] * shape[2]): + navigation_index = 0 + frame_number += 1 + # break the for loop when we reach the last frame we want to read + if frame_number == last_frame: + break + # if different of ‘65535’, add a count to the corresponding channel + if count_channel != 65535: + if first_frame <= frame_number: + spectrum_image[frame_number - first_frame, + navigation_index // shape[2], + navigation_index % shape[2], + count_channel // rebin_energy] += 1 + else: + navigation_index += 1 + + +def stream_to_array( + stream, spatial_shape, channels, last_frame, first_frame=0, + rebin_energy=1, sum_frames=True, dtype="uint16", spectrum_image=None): + """Returns data stored in a FEI stream as a nd COO array + + Parameters + ---------- + stream: numpy array + spatial_shape: tuple of ints + (ysize, xsize) + channels: ints + Number of channels in the spectrum + rebin_energy: int + Rebin the spectra. The default is 1 (no rebinning applied) + sum_frames: bool + If True, sum all the frames + dtype: numpy dtype + dtype of the array where to store the data + number_of_frame: int or None + spectrum_image: numpy array or None + If not None, the array provided will be filled with the data in the + stream. + + """ + + frames = last_frame - first_frame + if not sum_frames: + if spectrum_image is None: + spectrum_image = np.zeros( + (frames, spatial_shape[0], spatial_shape[1], + int(channels / rebin_energy)), + dtype=dtype) + + _fill_array_with_stream( + spectrum_image=spectrum_image, + stream=stream, + first_frame=first_frame, + last_frame=last_frame, + rebin_energy=rebin_energy) + else: + if spectrum_image is None: + spectrum_image = np.zeros( + (spatial_shape[0], spatial_shape[1], + int(channels / rebin_energy)), + dtype=dtype) + _fill_array_with_stream_sum_frames( + spectrum_image=spectrum_image, + stream=stream, + first_frame=first_frame, + last_frame=last_frame, + rebin_energy=rebin_energy) + return spectrum_image + + +@jit_ifnumba() +def array_to_stream(array): + """Convert an array to a FEI stream + + Parameters + ---------- + array: array + + """ + + channels = array.shape[-1] + flat_array = array.ravel() + stream_data = [] + channel = 0 + for value in flat_array: + for j in range(value): + stream_data.append(channel) + channel += 1 + if channel % channels == 0: + channel = 0 + stream_data.append(65535) + stream_data = stream_data[:-1] # Remove final mark + stream_data = np.array(stream_data) + return stream_data diff --git a/hyperspy/misc/slicing.py b/hyperspy/misc/slicing.py index a2fb8c1e21..64c728f959 100644 --- a/hyperspy/misc/slicing.py +++ b/hyperspy/misc/slicing.py @@ -1,6 +1,25 @@ +# -*- coding: utf-8 -*- +# Copyright 2007-2016 The HyperSpy developers +# +# This file is part of HyperSpy. +# +# HyperSpy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HyperSpy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HyperSpy. If not, see . + from operator import attrgetter import numpy as np from dask.array import Array as dArray + from hyperspy.misc.utils import attrsetter from hyperspy.misc.export_dictionary import parse_flag_string from hyperspy import roi @@ -146,6 +165,27 @@ def make_slice_navigation_decision(flags, isnav): class SpecialSlicers(object): def __init__(self, obj, isNavigation): + """Create a slice of the signal. The indexing supports integer, + decimal numbers or strings (containing a decimal number and an units). + + >>> s = hs.signals.Signal1D(np.arange(10)) + >>> s + + >>> s.data + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> s.axes_manager[0].scale = 0.5 + >>> s.axes_manager[0].axis + array([ 0. , 0.5, 1. , 1.5, 2. , 2.5, 3. , 3.5, 4. , 4.5]) + >>> s.isig[0.5:4.].data + array([1, 2, 3, 4, 5, 6, 7]) + >>> s.isig[0.5:4].data + array([1, 2, 3]) + >>> s.isig[0.5:4:2].data + array([1, 3]) + >>> s.axes_manager[0].units = 'µm' + >>> s.isig[:'2000 nm'].data + array([0, 1, 2, 3]) + """ self.isNavigation = isNavigation self.obj = obj @@ -296,5 +336,3 @@ def _slicer(self, slices, isNavigation=None, out=None): return _obj else: out.events.data_changed.trigger(obj=out) - -# vim: textwidth=80 diff --git a/hyperspy/misc/utils.py b/hyperspy/misc/utils.py index b13d65be08..8ac2a0ab5c 100644 --- a/hyperspy/misc/utils.py +++ b/hyperspy/misc/utils.py @@ -24,7 +24,6 @@ from io import StringIO import codecs import collections -import tempfile import unicodedata from contextlib import contextmanager from hyperspy.misc.signal_tools import broadcast_signals @@ -152,15 +151,14 @@ def slugify(value, valid_variable_name=False): try: # Convert to unicode using the default encoding value = str(value) - except: + except BaseException: # Try latin1. If this does not work an exception is raised. value = str(value, "latin1") value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore') value = value.translate(None, _slugify_strip_re_data).decode().strip() value = value.replace(' ', '_') - if valid_variable_name is True: - if value[:1].isdigit(): - value = 'Number_' + value + if valid_variable_name and not value.isidentifier(): + value = 'Number_' + value return value @@ -594,7 +592,7 @@ def ensure_unicode(stuff, encoding='utf8', encoding2='latin-1'): string = stuff try: string = string.decode(encoding) - except: + except BaseException: string = string.decode(encoding2, errors='ignore') return string @@ -947,7 +945,7 @@ def create_map_objects(function, nav_size, iterating_kwargs, **kwargs): from hyperspy.signal import BaseSignal from itertools import repeat - iterators = tuple(signal[1]._iterate_signal() + iterators = tuple(signal[1]._cycle_signal() if isinstance(signal[1], BaseSignal) else signal[1] for signal in iterating_kwargs) # make all kwargs iterating for simplicity: diff --git a/hyperspy/model.py b/hyperspy/model.py index 5202eab6f2..66a39ddb26 100644 --- a/hyperspy/model.py +++ b/hyperspy/model.py @@ -24,6 +24,7 @@ from distutils.version import LooseVersion import numpy as np +import dill import scipy import scipy.odr as odr from scipy.optimize import (leastsq, least_squares, @@ -64,6 +65,15 @@ class DummyComponentsContainer: components.__dict__.update(components2d.__dict__) +def reconstruct_component(comp_dictionary, **init_args): + _id = comp_dictionary['_id_name'] + try: + _class = getattr(components, _id) + except AttributeError: + _class = dill.loads(comp_dictionary['_class_dump']) + return _class(**init_args) + + class ModelComponents(object): """Container for model components. @@ -282,7 +292,7 @@ def _load_dictionary(self, dic): if 'init' in parse_flag_string(flags_str): init_args[k] = reconstruct_object(flags_str, comp[k]) - self.append(getattr(components, comp['_id_name'])(**init_args)) + self.append(reconstruct_component(comp, **init_args)) id_dict.update(self[-1]._load_dictionary(comp)) # deal with twins: for comp in dic['components']: @@ -791,6 +801,20 @@ def fetch_stored_values(self, only_fixed=False): for component in self: component.fetch_stored_values(only_fixed=only_fixed) + def fetch_values_from_array(self, array, array_std=None): + """Fetch the parameter values from the given array, optionally also + fetching the standard deviations. + + Parameters + ---------- + array : array + array with the parameter values + array_std : {None, array} + array with the standard deviations of parameters + """ + self.p0 = array + self._fetch_values_from_p0(p_std=array_std) + def _fetch_values_from_p0(self, p_std=None): """Fetch the parameter values from the output of the optimizer `self.p0` @@ -923,10 +947,13 @@ def fit(self, fitter="leastsq", method='ls', grad=False, If `variance` is a `Signal` instance of the same `navigation_dimension` as the signal, and `method` is "ls", then weighted least squares is performed. - method : {'ls', 'ml'} + method : {'ls', 'ml', 'custom'} Choose 'ls' (default) for least-squares and 'ml' for Poisson maximum likelihood estimation. The latter is not available when - 'fitter' is "leastsq", "odr" or "mpfit". + 'fitter' is "leastsq", "odr" or "mpfit". 'custom' allows passing + your own minimisation function as a kwarg "min_function", with + optional gradient kwarg "min_function_grad". See User Guide for + details. grad : bool If True, the analytical gradient is used if defined to speed up the optimization. @@ -988,6 +1015,20 @@ def fit(self, fitter="leastsq", method='ls', grad=False, # this has to be done before setting the p0, # so moved things around self.ensure_parameters_in_bounds() + min_function = kwargs.pop('min_function', None) + min_function_grad = kwargs.pop('min_function_grad', None) + if method == 'custom': + if not callable(min_function): + raise ValueError('Custom minimization requires "min_function" ' + 'kwarg with a callable') + if grad is not False: + if min_function_grad is None: + raise ValueError('Custom gradient function should be ' + 'supplied with "min_function_grad" kwarg') + from functools import partial + min_function = partial(min_function, self) + if callable(min_function_grad): + min_function_grad = partial(min_function_grad, self) with cm(update_on_resume=True): self.p_std = None @@ -1008,12 +1049,12 @@ def fit(self, fitter="leastsq", method='ls', grad=False, grad_ml = self._gradient_ml grad_ls = self._gradient_ls - if method == 'ml': + if method in ['ml', 'custom']: weights = None if fitter in ("leastsq", "odr", "mpfit"): raise NotImplementedError( - "Maximum likelihood estimation is not supported " - 'for the "leastsq", "mpfit" or "odr" optimizers') + '"leastsq", "mpfit" and "odr" optimizers only support' + 'least squares ("ls") method') elif method == "ls": metadata = self.signal.metadata if "Signal.Noise_properties.variance" not in metadata: @@ -1039,7 +1080,7 @@ def fit(self, fitter="leastsq", method='ls', grad=False, weights = 1. / np.sqrt(variance) else: raise ValueError( - 'method must be "ls" or "ml" but %s given' % + 'method must be "ls", "ml" or "custom" but %s given' % method) args = (self.signal()[np.where(self.channel_switches)], weights) @@ -1098,7 +1139,7 @@ def fit(self, fitter="leastsq", method='ls', grad=False, self.signal()[np.where(self.channel_switches)], sx=None, sy=(1 / weights if weights is not None else None)) - myodr = odr.ODR(mydata, modelo, beta0=self.p0[:]) + myodr = odr.ODR(mydata, modelo, beta0=self.p0[:], **kwargs) myoutput = myodr.run() result = myoutput.beta self.p_std = myoutput.sd_beta @@ -1118,7 +1159,7 @@ def fit(self, fitter="leastsq", method='ls', grad=False, 'y': self.signal()[self.channel_switches], 'weights': weights}, autoderivative=autoderivative, - quiet=1) + quiet=1, **kwargs) self.p0 = m.params if hasattr(self, 'axis') and (self.axis.size > len(self.p0)) \ @@ -1136,6 +1177,9 @@ def fit(self, fitter="leastsq", method='ls', grad=False, elif method == "ls": tominimize = self._errfunc2 fprime = grad_ls + elif method == 'custom': + tominimize = min_function + fprime = min_function_grad # OPTIMIZERS # Derivative-free methods @@ -1809,7 +1853,7 @@ def __getitem__(self, slices): flags_str, value = v if 'init' in parse_flag_string(flags_str): init_args[k] = value - _model.append(getattr(components, comp._id_name)(**init_args)) + _model.append(comp.__class__(**init_args)) copy_slice_from_whitelist(self.model, _model, dims, diff --git a/hyperspy/models/model1d.py b/hyperspy/models/model1d.py index 35d94df7c4..ca24228cf8 100644 --- a/hyperspy/models/model1d.py +++ b/hyperspy/models/model1d.py @@ -54,7 +54,7 @@ def __init__(self, model, component, signal_range=None, self.only_current = only_current if signal_range == "interactive": if (not hasattr(self.model, '_plot') or self.model._plot is None or - not self.model._plot.is_active): + not self.model._plot.is_active): self.model.plot() self.span_selector_switch(on=True) @@ -383,7 +383,7 @@ def __call__(self, non_convolved=False, onlyactive=False, If True, only the active components will be used to build the model. component_list : list or None - If None, the sum of all the components is returned. If list, only + If None, the sum of all the components is returned. If list, only the provided components are returned cursor: 1 or 2 @@ -641,7 +641,7 @@ def _model2plot(self, axes_manager, out_of_range2nans=True): s = ns return s - def plot(self, plot_components=False): + def plot(self, plot_components=False, **kwargs): """Plots the current spectrum to the screen and a map with a cursor to explore the SI. @@ -649,11 +649,14 @@ def plot(self, plot_components=False): ---------- plot_components : bool If True, add a line per component to the signal figure. + kwargs: + All extra keyword arguements are passed to ``Signal1D.plot`` + """ # If new coordinates are assigned - self.signal.plot() + self.signal.plot(**kwargs) _plot = self.signal._plot l1 = _plot.signal_plot.ax_lines[0] color = l1.line.get_color() diff --git a/hyperspy/roi.py b/hyperspy/roi.py index 56bb0eb168..074e72444a 100644 --- a/hyperspy/roi.py +++ b/hyperspy/roi.py @@ -45,6 +45,8 @@ """ +from functools import partial + import traits.api as t import numpy as np @@ -391,7 +393,8 @@ def _on_widget_change(self, widget): self._update_widgets(exclude=(widget,)) self.events.changed.trigger(self) - def add_widget(self, signal, axes=None, widget=None, color='green'): + def add_widget(self, signal, axes=None, widget=None, + color='green', **kwargs): """Add a widget to visually represent the ROI, and connect it so any changes in either are reflected in the other. Note that only one widget can be added per signal/axes combination. @@ -405,8 +408,6 @@ def add_widget(self, signal, axes=None, widget=None, color='green'): axes : specification of axes to use, default = None The axes argument specifies which axes the ROI will be applied on. The DataAxis in the collection can be either of the following: - * "navigation" or "signal", in which the first axes of that - space's axes will be used. * a tuple of: - DataAxis. These will not be checked with signal.axes_manager. @@ -421,10 +422,14 @@ def add_widget(self, signal, axes=None, widget=None, color='green'): The color for the widget. Any format that matplotlib uses should be ok. This will not change the color fo any widget passed with the 'widget' argument. + kwargs: + All keyword argument are passed to the widget constructor. """ axes = self._parse_axes(axes, signal.axes_manager,) if widget is None: - widget = self._get_widget_type(axes, signal)(signal.axes_manager) + widget = self._get_widget_type( + axes, signal)( + signal.axes_manager, **kwargs) widget.color = color # Remove existing ROI, if it exsists and axes match @@ -486,6 +491,30 @@ def __call__(self, signal, out=None, axes=None): return s +def guess_vertical_or_horizontal(axes, signal): + # Figure out whether to use horizontal or veritcal line: + if axes[0].navigate: + plotdim = len(signal._plot.navigator_data_function().shape) + axdim = signal.axes_manager.navigation_dimension + idx = signal.axes_manager.navigation_axes.index(axes[0]) + else: + plotdim = len(signal._plot.signal_data_function().shape) + axdim = signal.axes_manager.signal_dimension + idx = signal.axes_manager.signal_axes.index(axes[0]) + + if plotdim == 2: # Plot is an image + # axdim == 1 and plotdim == 2 indicates "spectrum stack" + if idx == 0 and axdim != 1: # Axis is horizontal + return "vertical" + else: # Axis is vertical + return "horizontal" + elif plotdim == 1: # It is a spectrum + return "vertical" + else: + raise ValueError( + "Could not find valid widget type for the given `axes` value") + + @add_gui_method(toolkey="Point1DROI") class Point1DROI(BasePointROI): @@ -516,26 +545,13 @@ def _apply_roi2widget(self, widget): widget.position = (self.value,) def _get_widget_type(self, axes, signal): - # Figure out whether to use horizontal or veritcal line: - if axes[0].navigate: - plotdim = len(signal._plot.navigator_data_function().shape) - axdim = signal.axes_manager.navigation_dimension - idx = signal.axes_manager.navigation_axes.index(axes[0]) - else: - plotdim = len(signal._plot.signal_data_function().shape) - axdim = signal.axes_manager.signal_dimension - idx = signal.axes_manager.signal_axes.index(axes[0]) - - if plotdim == 2: # Plot is an image - # axdim == 1 and plotdim == 2 indicates "spectrum stack" - if idx == 0 and axdim != 1: # Axis is horizontal - return widgets.VerticalLineWidget - else: # Axis is vertical - return widgets.HorizontalLineWidget - elif plotdim == 1: # It is a spectrum + direction = guess_vertical_or_horizontal(axes=axes, signal=signal) + if direction == "vertical": return widgets.VerticalLineWidget + elif direction == "horizontal": + return widgets.HorizontalLineWidget else: - raise ValueError("Could not find valid widget type") + raise ValueError("direction must be either horizontal or vertical") def __repr__(self): return "%s(value=%g)" % ( @@ -628,7 +644,13 @@ def _apply_roi2widget(self, widget): widget.set_bounds(left=self.left, right=self.right) def _get_widget_type(self, axes, signal): - return widgets.RangeWidget + direction = guess_vertical_or_horizontal(axes=axes, signal=signal) + if direction == "vertical": + return partial(widgets.RangeWidget, direction="horizontal") + elif direction == "horizontal": + return partial(widgets.RangeWidget, direction="vertical") + else: + raise ValueError("direction must be either horizontal or vertical") def __repr__(self): return "%s(left=%g, right=%g)" % ( diff --git a/hyperspy/samfire.py b/hyperspy/samfire.py index c35eb8991f..f9c87eecd2 100644 --- a/hyperspy/samfire.py +++ b/hyperspy/samfire.py @@ -234,6 +234,10 @@ def start(self, **kwargs): self._setup() if self._workers and self.pool is not None: self.pool.update_parameters() + if 'min_function' in kwargs: + kwargs['min_function'] = dill.dumps(kwargs['min_function']) + if 'min_function_grad' in kwargs: + kwargs['min_function_grad'] = dill.dumps(kwargs['min_function_grad']) self._args = kwargs num_of_strat = len(self.strategies) total_size = self.model.axes_manager.navigation_size - self.pixels_done diff --git a/hyperspy/samfire_utils/samfire_pool.py b/hyperspy/samfire_utils/samfire_pool.py index dc08394f66..d114cfa0c4 100644 --- a/hyperspy/samfire_utils/samfire_pool.py +++ b/hyperspy/samfire_utils/samfire_pool.py @@ -151,6 +151,10 @@ def prepare_workers(self, samfire): self.samf = samfire mall = samfire.model model = mall.inav[mall.axes_manager.indices] + if model.signal.metadata.has_item('Signal.Noise_properties.variance'): + var = model.signal.metadata.Signal.Noise_properties.variance + if var._lazy: + var.compute() model.store('z') m_dict = model.signal._to_dictionary(False) m_dict['models'] = model.signal.models._models.as_dictionary() @@ -263,14 +267,14 @@ def add_jobs(self, needed_number=None): needed_number: {None, int} The number of jobs to add. If None (default), adds `need_pixels` """ + def test_func(worker, ind, value_dict): + return worker.run_pixel(ind, value_dict) if needed_number is None: needed_number = self.need_pixels for ind, value_dict in self.samf.generate_values(needed_number): if self.is_multiprocessing: self.shared_queue.put(('run_pixel', (ind, value_dict))) elif self.is_ipyparallel: - def test_func(worker, ind, value_dict): - return worker.run_pixel(ind, value_dict) self.results.append((self.pool.apply_async(test_func, self.rworker, ind, value_dict), ind)) diff --git a/hyperspy/samfire_utils/samfire_worker.py b/hyperspy/samfire_utils/samfire_worker.py index 92d769a503..9d46e3a9eb 100644 --- a/hyperspy/samfire_utils/samfire_worker.py +++ b/hyperspy/samfire_utils/samfire_worker.py @@ -165,6 +165,13 @@ def run_pixel(self, ind, value_dict): self.value_dict = value_dict self.fitting_kwargs = self.value_dict.pop('fitting_kwargs', {}) + if 'min_function' in self.fitting_kwargs: + self.fitting_kwargs['min_function'] = dill.loads( + self.fitting_kwargs['min_function']) + if 'min_function_grad' in self.fitting_kwargs and isinstance( + self.fitting_kwargs['min_function_grad'], bytes): + self.fitting_kwargs['min_function_grad'] = dill.loads( + self.fitting_kwargs['min_function_grad']) self.model.signal.data[:] = self.value_dict.pop('signal.data') if self.model.signal.metadata.has_item( diff --git a/hyperspy/signal.py b/hyperspy/signal.py index 254c87b398..6e2d1a0a0b 100644 --- a/hyperspy/signal.py +++ b/hyperspy/signal.py @@ -23,9 +23,11 @@ from contextlib import contextmanager from datetime import datetime import logging +from pint import UnitRegistry, UndefinedUnitError import numpy as np import scipy as sp +import dask.array as da from matplotlib import pyplot as plt import traits.api as t import numbers @@ -1932,8 +1934,8 @@ def __call__(self, axes_manager=None): return np.atleast_1d( self.data.__getitem__(axes_manager._getitem_tuple)) - def plot(self, navigator="auto", axes_manager=None, - plot_markers=True, **kwargs): + def plot(self, navigator="auto", axes_manager=None, plot_markers=True, + **kwargs): """%s %s @@ -2157,7 +2159,7 @@ def get_dimensions_from_data(self): for axis in self.axes_manager._axes: axis.size = int(dc.shape[axis.index_in_array]) - def crop(self, axis, start=None, end=None): + def crop(self, axis, start=None, end=None, convert_units=False): """Crops the data in a given axis. The range is given in pixels Parameters @@ -2171,6 +2173,10 @@ def crop(self, axis, start=None, end=None): the value is taken as the axis index. If float the index is calculated using the axis calibration. If start/end is None crop from/to the low/high end of the axis. + convert_units : bool + Default is False + If True, convert the units using the 'convert_to_units' method of + the 'axes_manager'. If False, does nothing. """ axis = self.axes_manager[axis] @@ -2187,6 +2193,8 @@ def crop(self, axis, start=None, end=None): self.get_dimensions_from_data() self.squeeze() self.events.data_changed.trigger(obj=self) + if convert_units: + self.axes_manager.convert_units(axis) def swap_axes(self, axis1, axis2, optimize=False): """Swaps the axes. @@ -2380,7 +2388,8 @@ def rebin(self, new_shape=None, scale=None, crop=True, out=None): s.data = data s.get_dimensions_from_data() for i, factor in enumerate(factors): - s.axes_manager[i].offset += ((factor-1) * s.axes_manager[i].scale)/2 + s.axes_manager[i].offset += ((factor - 1) + * s.axes_manager[i].scale) / 2 for axis, axis_src in zip(s.axes_manager._axes, self.axes_manager._axes): axis.scale = axis_src.scale * factors[axis.index_in_array] @@ -2737,6 +2746,46 @@ def _iterate_signal(self): getitem[unfolded_axis] = i yield(data[tuple(getitem)]) + def _cycle_signal(self): + """Cycles over the signal data. + + It is faster than using the signal iterator. + + Warning! could produce a infinite loop. + + """ + if self.axes_manager.navigation_size < 2: + while True: + yield self() + return + self._make_sure_data_is_contiguous() + axes = [axis.index_in_array for + axis in self.axes_manager.signal_axes] + if axes: + unfolded_axis = ( + self.axes_manager.navigation_axes[0].index_in_array) + new_shape = [1] * len(self.data.shape) + for axis in axes: + new_shape[axis] = self.data.shape[axis] + new_shape[unfolded_axis] = -1 + else: # signal_dimension == 0 + new_shape = (-1, 1) + axes = [1] + unfolded_axis = 0 + # Warning! if the data is not contigous it will make a copy!! + data = self.data.reshape(new_shape) + getitem = [0] * len(data.shape) + for axis in axes: + getitem[axis] = slice(None) + i = 0 + Ni = data.shape[unfolded_axis] + while True: + getitem[unfolded_axis] = i + yield(data[tuple(getitem)]) + i += 1 + if i == Ni: + i = 0 + def _remove_axis(self, axes): am = self.axes_manager axes = am[axes] @@ -3201,6 +3250,153 @@ def integrate_simpson(self, axis, out=None): return s integrate_simpson.__doc__ %= (ONE_AXIS_PARAMETER, OUT_ARG) + def fft(self, shifted=False, **kwargs): + """Compute the discrete Fourier Transform. + + This function computes the discrete Fourier Transform over the signal + axes by means of the Fast Fourier Transform (FFT) as implemented in + numpy. + + Parameters + ---------- + shifted : bool, optional + If True, the origin of FFT will be shifted in the centre (Default: False). + + **kwargs + other keyword arguments are described in np.fft.fftn(). + + Return + ------ + s : ComplexSignal + + Examples + -------- + >>> im = hs.signals.Signal2D(scipy.misc.ascent()) + >>> im.fft() + + # Use following to plot power spectrum of `im`: + >>> np.log(im.fft(shifted=True).amplitude).plot() + + Notes + ----- + For further information see the documentation of numpy.fft.fftn + """ + + if self.axes_manager.signal_dimension == 0: + raise AttributeError("Signal dimension must be at least one.") + ax = self.axes_manager + axes = ax.signal_indices_in_array + if isinstance(self.data, da.Array): + if shifted: + im_fft = self._deepcopy_with_new_data(da.fft.fftshift( + da.fft.fftn(self.data, axes=axes, **kwargs), axes=axes)) + else: + im_fft = self._deepcopy_with_new_data( + da.fft.fftn(self.data, axes=axes, **kwargs)) + else: + if shifted: + im_fft = self._deepcopy_with_new_data(np.fft.fftshift( + np.fft.fftn(self.data, axes=axes, **kwargs), axes=axes)) + else: + im_fft = self._deepcopy_with_new_data( + np.fft.fftn(self.data, axes=axes, **kwargs)) + + im_fft.change_dtype("complex") + im_fft.metadata.General.title = 'FFT of {}'.format( + im_fft.metadata.General.title) + im_fft.metadata.set_item('Signal.FFT.shifted', shifted) + + ureg = UnitRegistry() + for axis in im_fft.axes_manager.signal_axes: + axis.scale = 1. / axis.size / axis.scale + try: + units = ureg.parse_expression(str(axis.units))**(-1) + axis.units = '{:~}'.format(units.units) + except UndefinedUnitError: + _logger.warning('Units are not set or cannot be recognized') + if shifted: + axis.offset = -axis.high_value / 2. + return im_fft + + def ifft(self, shifted=None, **kwargs): + """ + Compute the inverse discrete Fourier Transform. + + This function computes real part of the inverse of the discrete + Fourier Transform over the signal axes by means of the + Fast Fourier Transform (FFT) as implemented in + numpy. + + Parameters + ---------- + shifted : bool or None, optional + If None the shift option will be set to the original status of the FFT using value in metadata. + If no FFT entry is present in metadata the parameter will be set to False. + If True, the origin of FFT will be shifted in the centre, + otherwise the origin would be kept at (0, 0)(Default: None). + **kwargs + other keyword arguments are described in np.fft.ifftn(). + + Return + ------ + s : Signal + + Examples + -------- + >>> import scipy + >>> im = hs.signals.Signal2D(scipy.misc.ascent()) + >>> imfft = im.fft() + >>> imfft.ifft() + + + Notes + ----- + For further information see the documentation of numpy.fft.ifftn + + """ + + if self.axes_manager.signal_dimension == 0: + raise AttributeError("Signal dimension must be at least one.") + ax = self.axes_manager + axes = ax.signal_indices_in_array + if shifted is None: + try: + shifted = self.metadata.Signal.FFT.shifted + except AttributeError: + shifted = False + + if isinstance(self.data, da.Array): + if shifted: + fft_data_shifted = da.fft.ifftshift(self.data, axes=axes) + im_ifft = self._deepcopy_with_new_data( + da.fft.ifftn(fft_data_shifted, axes=axes, **kwargs)) + else: + im_ifft = self._deepcopy_with_new_data(da.fft.ifftn( + self.data, axes=axes, **kwargs)) + else: + if shifted: + im_ifft = self._deepcopy_with_new_data(np.fft.ifftn(np.fft.ifftshift( + self.data, axes=axes), axes=axes, **kwargs)) + else: + im_ifft = self._deepcopy_with_new_data(np.fft.ifftn( + self.data, axes=axes, **kwargs)) + + im_ifft.metadata.General.title = 'iFFT of {}'.format( + im_ifft.metadata.General.title) + im_ifft.metadata.Signal.__delattr__('FFT') + im_ifft = im_ifft.real + + ureg = UnitRegistry() + for axis in im_ifft.axes_manager.signal_axes: + axis.scale = 1. / axis.size / axis.scale + try: + units = ureg.parse_expression(str(axis.units)) ** (-1) + axis.units = '{:~}'.format(units.units) + except UndefinedUnitError: + _logger.warning('Units are not set or cannot be recognized') + axis.offset = 0. + return im_ifft + def integrate1D(self, axis, out=None): """Integrate the signal over the given axis. @@ -3950,12 +4146,35 @@ def get_current_signal(self, auto_title=True, auto_filename=True): """ + + metadata = self.metadata.deepcopy() + + # Check if marker update + if metadata.has_item('Markers'): + marker_name_list = metadata.Markers.keys() + markers_dict = metadata.Markers.__dict__ + for marker_name in marker_name_list: + marker = markers_dict[marker_name]['_dtb_value_'] + if marker.auto_update: + marker.axes_manager = self.axes_manager + key_dict = {} + for key in marker.data.dtype.names: + key_dict[key] = marker.get_data_position(key) + marker.set_data(**key_dict) + cs = self.__class__( self(), axes=self.axes_manager._get_signal_axes_dicts(), - metadata=self.metadata.as_dictionary(), + metadata=metadata.as_dictionary(), attributes={'_lazy': False}) + if cs.metadata.has_item('Markers'): + temp_marker_dict = cs.metadata.Markers.as_dictionary() + markers_dict = markers_metadata_dict_to_markers( + temp_marker_dict, + cs.axes_manager) + cs.metadata.Markers = markers_dict + if auto_filename is True and self.tmp_parameters.has_item('filename'): cs.tmp_parameters.filename = (self.tmp_parameters.filename + '_' + diff --git a/hyperspy/signal_tools.py b/hyperspy/signal_tools.py index c4ead9a72a..10c38b14d2 100644 --- a/hyperspy/signal_tools.py +++ b/hyperspy/signal_tools.py @@ -75,9 +75,14 @@ def span_selector_switch(self, on): def update_span_selector_traits(self, *args, **kwargs): if not self.signal._plot.is_active: return - self.ss_left_value = self.span_selector.rect.get_x() - self.ss_right_value = self.ss_left_value + \ - self.span_selector.rect.get_width() + x0 = self.span_selector.rect.get_x() + if x0 < self.axis.low_value: + x0 = self.axis.low_value + self.ss_left_value = x0 + x1 = self.ss_left_value + self.span_selector.rect.get_width() + if x1 > self.axis.high_value: + x1 = self.axis.high_value + self.ss_right_value = x1 def reset_span_selector(self): self.span_selector_switch(False) @@ -647,8 +652,16 @@ class BackgroundRemoval(SpanSelectorInSignal1D): polynomial_order = t.Range(1, 10) fast = t.Bool(True, desc=("Perform a fast (analytic, but possibly less accurate)" - " estimation of the background. Otherwise use " - "non-linear least squares.")) + " estimation \nof the background. " + "Otherwise use non-linear least " + "squares.")) + zero_fill = t.Bool( + False, + desc=("Set all spectral channels lower than the lower \n" + "bound of the fitting range to zero (this is the \n" + "default behavior of Gatan's DigitalMicrograph). \n" + "Otherwise leave the pre-fitting region as-is \n" + "(useful for inspecting quality of background fit).")) background_estimator = t.Instance(Component) bg_line_range = t.Enum('from_left_range', 'full', @@ -657,7 +670,7 @@ class BackgroundRemoval(SpanSelectorInSignal1D): hi = t.Int(0) def __init__(self, signal, background_type='Power Law', polynomial_order=2, - fast=True, show_progressbar=None): + fast=True, plot_remainder=True, zero_fill=False, show_progressbar=None): super(BackgroundRemoval, self).__init__(signal) # setting the polynomial order will change the backgroud_type to # polynomial, so we set it before setting the background type @@ -665,13 +678,19 @@ def __init__(self, signal, background_type='Power Law', polynomial_order=2, self.background_type = background_type self.set_background_estimator() self.fast = fast + self.plot_remainder = plot_remainder + self.zero_fill = zero_fill self.show_progressbar = show_progressbar self.bg_line = None + self.rm_line = None def on_disabling_span_selector(self): if self.bg_line is not None: self.bg_line.close() self.bg_line = None + if self.rm_line is not None: + self.rm_line.close() + self.rm_line = None def set_background_estimator(self): if self.background_type == 'Power Law': @@ -715,6 +734,17 @@ def create_background_line(self): self.bg_line.autoscale = False self.bg_line.plot() + def create_remainder_line(self): + self.rm_line = drawing.signal1d.Signal1DLine() + self.rm_line.data_function = self.rm_to_plot + self.rm_line.set_line_properties( + color='green', + type='line', + scaley=False) + self.signal._plot.signal_plot.add_line(self.rm_line) + self.rm_line.autoscale = False + self.rm_line.plot() + def bg_to_plot(self, axes_manager=None, fill_with=np.nan): # First try to update the estimation self.background_estimator.estimate_parameters( @@ -743,6 +773,9 @@ def bg_to_plot(self, axes_manager=None, fill_with=np.nan): to_return *= self.axis.scale return to_return + def rm_to_plot(self, axes_manager=None, fill_with=np.nan): + return self.signal() - self.bg_line.line.get_ydata() + def span_selector_changed(self): if self.ss_left_value is np.nan or self.ss_right_value is np.nan or\ self.ss_right_value <= self.ss_left_value: @@ -757,6 +790,15 @@ def span_selector_changed(self): self.create_background_line() else: self.bg_line.update() + if self.plot_remainder: + if self.rm_line is None and \ + self.background_estimator.estimate_parameters( + self.signal, self.ss_left_value, + self.ss_right_value, + only_current=True) is True: + self.create_remainder_line() + else: + self.rm_line.update() def apply(self): if self.signal._plot: @@ -770,6 +812,7 @@ def apply(self): signal_range=(self.ss_left_value, self.ss_right_value), background_type=background_type, fast=self.fast, + zero_fill=self.zero_fill, polynomial_order=self.polynomial_order, show_progressbar=self.show_progressbar) self.signal.data = new_spectra.data @@ -920,14 +963,15 @@ def find(self, back=False): self._reset_line() ncoordinates = len(self.coordinates) spike = self.detect_spike() - while not spike and ( - (self.index < ncoordinates - 1 and back is False) or - (self.index > 0 and back is True)): - if back is False: - self.index += 1 - else: - self.index -= 1 - spike = self.detect_spike() + with self.signal.axes_manager.events.indices_changed.suppress(): + while not spike and ( + (self.index < ncoordinates - 1 and back is False) or + (self.index > 0 and back is True)): + if back is False: + self.index += 1 + else: + self.index -= 1 + spike = self.detect_spike() if spike is False: m = SimpleMessage() diff --git a/hyperspy/tests/axes/test_axes_manager.py b/hyperspy/tests/axes/test_axes_manager.py index 92fb347724..9629ba3560 100644 --- a/hyperspy/tests/axes/test_axes_manager.py +++ b/hyperspy/tests/axes/test_axes_manager.py @@ -21,7 +21,8 @@ from hyperspy.axes import AxesManager from hyperspy.signals import BaseSignal, Signal1D, Signal2D -from numpy import arange +from hyperspy.defaults_parser import preferences +from numpy import arange, zeros class TestAxesManager: @@ -263,3 +264,46 @@ def test_setting_indices_coordinates(): s.axes_manager.indices = (2, 2) assert s.axes_manager.indices == (2, 2) assert m.call_count == 6 + + +class TestAxesHotkeys: + + def setup_method(self, method): + s = Signal1D(zeros(7 * (5,))) + self.am = s.axes_manager + + def test_hotkeys_in_six_dimensions(self): + 'Step twice increasing and once decreasing all axes' + + mod01 = preferences.Plot.modifier_dims_01 + mod23 = preferences.Plot.modifier_dims_23 + mod45 = preferences.Plot.modifier_dims_45 + + dim0_decrease = mod01 + '+' + preferences.Plot.dims_024_decrease + dim0_increase = mod01 + '+' + preferences.Plot.dims_024_increase + dim1_decrease = mod01 + '+' + preferences.Plot.dims_135_decrease + dim1_increase = mod01 + '+' + preferences.Plot.dims_135_increase + dim2_decrease = mod23 + '+' + preferences.Plot.dims_024_decrease + dim2_increase = mod23 + '+' + preferences.Plot.dims_024_increase + dim3_decrease = mod23 + '+' + preferences.Plot.dims_135_decrease + dim3_increase = mod23 + '+' + preferences.Plot.dims_135_increase + dim4_decrease = mod45 + '+' + preferences.Plot.dims_024_decrease + dim4_increase = mod45 + '+' + preferences.Plot.dims_024_increase + dim5_decrease = mod45 + '+' + preferences.Plot.dims_135_decrease + dim5_increase = mod45 + '+' + preferences.Plot.dims_135_increase + + steps = [dim0_increase, dim0_increase, dim0_decrease, dim1_increase, + dim1_increase, dim1_decrease, dim2_increase, dim2_increase, dim2_decrease, + dim3_increase, dim3_increase, dim3_decrease, dim4_increase, + dim4_increase, dim4_decrease, dim5_increase, dim5_increase, dim5_decrease] + + class fake_key_event(): + 'Fake event handler for plot key press' + + def __init__(self, key): + self.key = key + + for step in steps: + self.am.key_navigator(fake_key_event(step)) + + assert self.am.indices == (1, 1, 1, 1, 1, 1) diff --git a/hyperspy/tests/axes/test_conversion_units.py b/hyperspy/tests/axes/test_conversion_units.py new file mode 100644 index 0000000000..91fe09c45f --- /dev/null +++ b/hyperspy/tests/axes/test_conversion_units.py @@ -0,0 +1,449 @@ +# -*- coding: utf-8 -*- +# Copyright 2007-2016 The HyperSpy developers +# +# This file is part of HyperSpy. +# +# HyperSpy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HyperSpy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HyperSpy. If not, see . + +import numpy.testing as nt +import traits.api as t +import pytest + +from hyperspy.axes import DataAxis, AxesManager, UnitConversion, _ureg +from hyperspy.misc.test_utils import assert_warns, assert_deep_almost_equal + + +class TestUnitConversion: + + def setup_method(self, method): + self.uc = UnitConversion() + self._set_units_scale_size(units='m', scale=1E-3) + + def _set_units_scale_size(self, units=t.Undefined, scale=1.0, size=100, + offset=0.0): + self.uc.units = units + self.uc.scale = scale + self.uc.offset = offset + self.uc.size = size + + def test_units_setter(self): + self.uc.units = ' m' + assert self.uc.units == ' m' + self.uc.units = 'um' + assert self.uc.units == 'um' + self.uc.units = 'µm' + assert self.uc.units == 'µm' + self.uc.units = 'km' + assert self.uc.units == 'km' + + def test_ignore_conversion(self): + assert self.uc._ignore_conversion(t.Undefined) + with assert_warns( + message="not supported for conversion.", + category=UserWarning): + assert self.uc._ignore_conversion('unit_not_supported') + assert not self.uc._ignore_conversion('m') + + def test_converted_compact_scale_units(self): + self.uc.units = 'micron' + with assert_warns( + message="not supported for conversion.", + category=UserWarning): + self.uc._convert_compact_units() + assert self.uc.units == 'micron' + nt.assert_almost_equal(self.uc.scale, 1.0E-3) + + def test_convert_to_units(self): + self._set_units_scale_size(t.Undefined, 1.0) + out = self.uc._convert_units('nm') + assert out is None + assert self.uc.units == t.Undefined + nt.assert_almost_equal(self.uc.scale, 1.0) + + self._set_units_scale_size('m', 1.0E-3) + out = self.uc._convert_units('µm') + assert out is None + assert self.uc.units == 'um' + nt.assert_almost_equal(self.uc.scale, 1E3) + + self._set_units_scale_size('µm', 0.5) + out = self.uc._convert_units('nm') + assert out is None + assert self.uc.units == 'nm' + nt.assert_almost_equal(self.uc.scale, 500) + + self._set_units_scale_size('µm', 5) + out = self.uc._convert_units('cm') + assert out is None + assert self.uc.units == 'cm' + nt.assert_almost_equal(self.uc.scale, 0.0005) + + self._set_units_scale_size('1/µm', 5) + out = self.uc._convert_units('1/nm') + assert out is None + assert self.uc.units == '1 / nm' + nt.assert_almost_equal(self.uc.scale, 0.005) + + self._set_units_scale_size('eV', 5) + out = self.uc._convert_units('keV') + assert out is None + assert self.uc.units == 'keV' + nt.assert_almost_equal(self.uc.scale, 0.005) + + def test_convert_to_units_not_in_place(self): + self._set_units_scale_size(t.Undefined, 1.0) + out = self.uc.convert_to_units('nm', inplace=False) + assert out is None # unit conversion is ignored + assert self.uc.units == t.Undefined + nt.assert_almost_equal(self.uc.scale, 1.0) + + self._set_units_scale_size('m', 1.0E-3) + out = self.uc.convert_to_units('µm', inplace=False) + assert out == (1E3, 0.0, 'um') + assert self.uc.units == 'm' + nt.assert_almost_equal(self.uc.scale, 1.0E-3) + nt.assert_almost_equal(self.uc.offset, 0.0) + + self._set_units_scale_size('µm', 0.5) + out = self.uc.convert_to_units('nm', inplace=False) + assert out[1:] == (0.0, 'nm') + nt.assert_almost_equal(out[0], 500.0) + assert self.uc.units == 'µm' + nt.assert_almost_equal(self.uc.scale, 0.5) + + def test_get_compact_unit(self): + ##### Imaging ##### + # typical setting for high resolution image + self._set_units_scale_size('m', 12E-12, 2048, 2E-9) + self.uc._convert_compact_units() + assert self.uc.units == 'nm' + nt.assert_almost_equal(self.uc.scale, 0.012) + nt.assert_almost_equal(self.uc.offset, 2.0) + + # typical setting for nm resolution image + self._set_units_scale_size('m', 0.5E-9, 1024) + self.uc._convert_compact_units() + assert self.uc.units == 'nm' + nt.assert_almost_equal(self.uc.scale, 0.5) + nt.assert_almost_equal(self.uc.offset, 0.0) + + ##### Diffraction ##### + # typical TEM diffraction + self._set_units_scale_size('1/m', 0.1E9, 1024) + self.uc._convert_compact_units() + assert self.uc.units == '1 / nm' + nt.assert_almost_equal(self.uc.scale, 0.1) + + # typical TEM diffraction + self._set_units_scale_size('1/m', 0.01E9, 256) + self.uc._convert_compact_units() + assert self.uc.units == '1 / um' + nt.assert_almost_equal(self.uc.scale, 10.0) + + # high camera length diffraction + self._set_units_scale_size('1/m', 0.1E9, 4096) + self.uc._convert_compact_units() + assert self.uc.units == '1 / nm' + nt.assert_almost_equal(self.uc.scale, 0.1) + + # typical EDS resolution + self._set_units_scale_size('eV', 50, 4096, 0.0) + self.uc._convert_compact_units() + assert self.uc.units == 'keV' + nt.assert_almost_equal(self.uc.scale, 0.05) + nt.assert_almost_equal(self.uc.offset, 0.0) + + ##### Spectroscopy ##### + # typical EELS resolution + self._set_units_scale_size('eV', 0.2, 2048, 200.0) + self.uc._convert_compact_units() + assert self.uc.units == 'eV' + nt.assert_almost_equal(self.uc.scale, 0.2) + nt.assert_almost_equal(self.uc.offset, 200.0) + + # typical EELS resolution + self._set_units_scale_size('eV', 1.0, 2048, 500.0) + self.uc._convert_compact_units() + assert self.uc.units == 'eV' + nt.assert_almost_equal(self.uc.scale, 1.0) + nt.assert_almost_equal(self.uc.offset, 500) + + # typical high resolution EELS resolution + self._set_units_scale_size('eV', 0.05, 100) + self.uc._convert_compact_units() + assert self.uc.units == 'eV' + assert self.uc.scale == 0.05 + + +class TestDataAxis: + + def setup_method(self, method): + self.axis = DataAxis(size=2048, scale=12E-12, units='m', offset=5E-9) + + def test_scale_offset_as_quantity_property(self): + assert self.axis.scale_as_quantity == 12E-12 * _ureg('m') + assert self.axis.offset_as_quantity == 5E-9 * _ureg('m') + + def test_scale_as_quantity_setter_string(self): + self.axis.scale_as_quantity = '2.5 nm' + assert self.axis.scale == 2.5 + nt.assert_almost_equal(self.axis.offset, 5.0) + assert self.axis.units == 'nm' + + def test_scale_as_quantity_setter_string_no_previous_units(self): + axis = DataAxis(size=2048, scale=12E-12, offset=5.0) + axis.scale_as_quantity = '2.5 nm' + assert axis.scale == 2.5 + # the units haven't been set previously, so the offset is not converted + nt.assert_almost_equal(axis.offset, 5.0) + assert axis.units == 'nm' + + def test_offset_as_quantity_setter_string(self): + self.axis.offset_as_quantity = '5e-3 mm' + assert self.axis.scale == 12e-9 + assert self.axis.offset == 5e-3 + assert self.axis.units == 'mm' + + def test_offset_as_quantity_setter_string_no_units(self): + self.axis.offset_as_quantity = '5e-3' + assert self.axis.offset == 5e-3 + assert self.axis.scale == 12E-12 + assert self.axis.units == 'm' + + def test_scale_offset_as_quantity_setter_float(self): + self.axis.scale_as_quantity = 2.5e-9 + assert self.axis.scale == 2.5e-9 + assert self.axis.units == 'm' + + def test_scale_offset_as_quantity_setter_pint_quantity(self): + self.axis.scale_as_quantity = _ureg.parse_expression('2.5 nm') + assert self.axis.scale == 2.5 + assert self.axis.units == 'nm' + + self.axis.offset_as_quantity = _ureg.parse_expression('5e-3 mm') + assert self.axis.offset == 5e-3 + assert self.axis.units == 'mm' + + def test_convert_to_compact_units(self): + self.axis.convert_to_units(units=None) + nt.assert_almost_equal(self.axis.scale, 0.012) + assert self.axis.units == 'nm' + nt.assert_almost_equal(self.axis.offset, 5.0) + + def test_convert_to_units(self): + self.axis.convert_to_units(units='µm') + nt.assert_almost_equal(self.axis.scale, 12E-6) + assert self.axis.units == 'um' + nt.assert_almost_equal(self.axis.offset, 0.005) + + def test_units_not_supported_by_pint_warning_raised(self): + # raising a warning, not converting scale + self.axis.units = 'micron' + with assert_warns( + message="not supported for conversion.", + category=UserWarning): + self.axis.convert_to_units('m') + nt.assert_almost_equal(self.axis.scale, 12E-12) + assert self.axis.units == 'micron' + + def test_units_not_supported_by_pint_warning_raised2(self): + # raising a warning, not converting scale + self.axis.units = 'µm' + with assert_warns( + message="not supported for conversion.", + category=UserWarning): + self.axis.convert_to_units('toto') + nt.assert_almost_equal(self.axis.scale, 12E-12) + assert self.axis.units == 'µm' + + +class TestAxesManager: + + def setup_method(self, method): + self.axes_list = [ + {'name': 'x', + 'navigate': True, + 'offset': 0.0, + 'scale': 1.5E-9, + 'size': 1024, + 'units': 'm'}, + {'name': 'y', + 'navigate': True, + 'offset': 0.0, + 'scale': 0.5E-9, + 'size': 1024, + 'units': 'm'}, + {'name': 'energy', + 'navigate': False, + 'offset': 0.0, + 'scale': 5.0, + 'size': 4096, + 'units': 'eV'}] + + self.am = AxesManager(self.axes_list) + + self.axes_list2 = [ + {'name': 'x', + 'navigate': True, + 'offset': 0.0, + 'scale': 1.5E-9, + 'size': 1024, + 'units': 'm'}, + {'name': 'energy', + 'navigate': False, + 'offset': 0.0, + 'scale': 2.5, + 'size': 4096, + 'units': 'eV'}, + {'name': 'energy2', + 'navigate': False, + 'offset': 0.0, + 'scale': 5.0, + 'size': 4096, + 'units': 'eV'}] + self.am2 = AxesManager(self.axes_list2) + + def test_compact_unit(self): + self.am.convert_units() + assert self.am['x'].units == 'nm' + nt.assert_almost_equal(self.am['x'].scale, 1.5) + assert self.am['y'].units == 'nm' + nt.assert_almost_equal(self.am['y'].scale, 0.5) + assert self.am['energy'].units == 'keV' + nt.assert_almost_equal(self.am['energy'].scale, 0.005) + + def test_convert_to_navigation_units(self): + self.am.convert_units(axes='navigation', units='mm') + nt.assert_almost_equal(self.am['x'].scale, 1.5E-6) + assert self.am['x'].units == 'mm' + nt.assert_almost_equal(self.am['y'].scale, 0.5E-6) + assert self.am['y'].units == 'mm' + nt.assert_almost_equal(self.am['energy'].scale, + self.axes_list[-1]['scale']) + + def test_convert_units_axes_integer(self): + # convert only the first axis + self.am.convert_units(axes=0, units='nm', same_units=False) + nt.assert_almost_equal(self.am[0].scale, 0.5) + assert self.am[0].units == 'nm' + nt.assert_almost_equal(self.am['x'].scale, 1.5E-9) + assert self.am['x'].units == 'm' + nt.assert_almost_equal(self.am['energy'].scale, + self.axes_list[-1]['scale']) + + self.am.convert_units(axes=0, units='nm', same_units=True) + nt.assert_almost_equal(self.am[0].scale, 0.5) + assert self.am[0].units == 'nm' + nt.assert_almost_equal(self.am['x'].scale, 1.5) + assert self.am['x'].units == 'nm' + + def test_convert_to_navigation_units_list(self): + self.am.convert_units(axes='navigation', units=['mm', 'nm'], + same_units=False) + nt.assert_almost_equal(self.am['x'].scale, 1.5) + assert self.am['x'].units == 'nm' + nt.assert_almost_equal(self.am['y'].scale, 0.5E-6) + assert self.am['y'].units == 'mm' + nt.assert_almost_equal(self.am['energy'].scale, + self.axes_list[-1]['scale']) + + def test_convert_to_navigation_units_list_same_units(self): + self.am.convert_units(axes='navigation', units=['mm', 'nm'], + same_units=True) + assert self.am['x'].units == 'mm' + nt.assert_almost_equal(self.am['x'].scale, 1.5e-6) + assert self.am['y'].units == 'mm' + nt.assert_almost_equal(self.am['y'].scale, 0.5e-6) + assert self.am['energy'].units == 'eV' + nt.assert_almost_equal(self.am['energy'].scale, 5) + + def test_convert_to_navigation_units_different(self): + # Don't convert the units since the units of the navigation axes are + # different + self.axes_list.insert(0, + {'name': 'time', + 'navigate': True, + 'offset': 0.0, + 'scale': 1.5, + 'size': 20, + 'units': 's'}) + am = AxesManager(self.axes_list) + am.convert_units(axes='navigation', same_units=True) + assert am['time'].units == 's' + nt.assert_almost_equal(am['time'].scale, 1.5) + assert am['x'].units == 'nm' + nt.assert_almost_equal(am['x'].scale, 1.5) + assert am['y'].units == 'nm' + nt.assert_almost_equal(am['y'].scale, 0.5) + assert am['energy'].units == 'eV' + nt.assert_almost_equal(am['energy'].scale, 5) + + def test_convert_to_navigation_units_Undefined(self): + self.axes_list[0]['units'] = t.Undefined + am = AxesManager(self.axes_list) + am.convert_units(axes='navigation', same_units=True) + assert am['x'].units == t.Undefined + nt.assert_almost_equal(am['x'].scale, 1.5E-9) + assert am['y'].units == 'm' + nt.assert_almost_equal(am['y'].scale, 0.5E-9) + assert am['energy'].units == 'eV' + nt.assert_almost_equal(am['energy'].scale, 5) + + def test_convert_to_signal_units(self): + self.am.convert_units(axes='signal', units='keV') + nt.assert_almost_equal(self.am['x'].scale, self.axes_list[0]['scale']) + assert self.am['x'].units == self.axes_list[0]['units'] + nt.assert_almost_equal(self.am['y'].scale, self.axes_list[1]['scale']) + assert self.am['y'].units == self.axes_list[1]['units'] + nt.assert_almost_equal(self.am['energy'].scale, 0.005) + assert self.am['energy'].units == 'keV' + + def test_convert_to_units_list(self): + self.am.convert_units(units=['µm', 'nm', 'meV'], same_units=False) + nt.assert_almost_equal(self.am['x'].scale, 1.5) + assert self.am['x'].units == 'nm' + nt.assert_almost_equal(self.am['y'].scale, 0.5E-3) + assert self.am['y'].units == 'um' + nt.assert_almost_equal(self.am['energy'].scale, 5E3) + assert self.am['energy'].units == 'meV' + + def test_convert_to_units_list_same_units(self): + self.am2.convert_units(units=['µm', 'eV', 'meV'], same_units=True) + nt.assert_almost_equal(self.am2['x'].scale, 0.0015) + assert self.am2['x'].units == 'um' + nt.assert_almost_equal(self.am2['energy'].scale, + self.axes_list2[1]['scale']) + assert self.am2['energy'].units == self.axes_list2[1]['units'] + nt.assert_almost_equal(self.am2['energy2'].scale, + self.axes_list2[2]['scale']) + assert self.am2['energy2'].units == self.axes_list2[2]['units'] + + def test_convert_to_units_list_signal2D(self): + self.am2.convert_units(units=['µm', 'eV', 'meV'], same_units=False) + nt.assert_almost_equal(self.am2['x'].scale, 0.0015) + assert self.am2['x'].units == 'um' + nt.assert_almost_equal(self.am2['energy'].scale, 2500) + assert self.am2['energy'].units == 'meV' + nt.assert_almost_equal(self.am2['energy2'].scale, 5.0) + assert self.am2['energy2'].units == 'eV' + + @pytest.mark.parametrize("same_units", (True, False)) + def test_convert_to_units_unsupported_units(self, same_units): + with assert_warns( + message="not supported for conversion.", + category=UserWarning): + self.am.convert_units('navigation', units='toto', + same_units=same_units) + assert_deep_almost_equal(self.am._get_axes_dicts(), + self.axes_list) diff --git a/hyperspy/tests/datasets/test_artificial_data.py b/hyperspy/tests/datasets/test_artificial_data.py new file mode 100644 index 0000000000..4825b37d60 --- /dev/null +++ b/hyperspy/tests/datasets/test_artificial_data.py @@ -0,0 +1,44 @@ +import numpy as np +import hyperspy.datasets.artificial_data as ad + + +def test_get_low_loss_eels_signal(): + s = ad.get_low_loss_eels_signal() + assert s.metadata.Signal.signal_type == 'EELS' + + +def test_get_core_loss_eels_signal(): + s = ad.get_core_loss_eels_signal(add_powerlaw=False) + assert s.metadata.Signal.signal_type == 'EELS' + s1 = ad.get_core_loss_eels_signal(add_powerlaw=True) + assert s1.metadata.Signal.signal_type == 'EELS' + assert s1.data.sum() > s.data.sum() + + np.random.seed(seed=10) + s2 = ad.get_core_loss_eels_signal() + np.random.seed(seed=10) + s3 = ad.get_core_loss_eels_signal() + assert (s2.data == s3.data).all() + + +def test_get_core_loss_eels_model(): + m = ad.get_core_loss_eels_model(add_powerlaw=False) + assert m.signal.metadata.Signal.signal_type == 'EELS' + m1 = ad.get_core_loss_eels_model(add_powerlaw=True) + assert m1.signal.metadata.Signal.signal_type == 'EELS' + assert m1.signal.data.sum() > m.signal.data.sum() + + +def test_get_low_loss_eels_line_scan_signal(): + s = ad.get_low_loss_eels_line_scan_signal() + assert s.metadata.Signal.signal_type == 'EELS' + + +def test_get_core_loss_eels_line_scan_signal(): + s = ad.get_core_loss_eels_line_scan_signal() + assert s.metadata.Signal.signal_type == 'EELS' + + +def test_get_atomic_resolution_tem_signal2d(): + s = ad.get_atomic_resolution_tem_signal2d() + assert s.axes_manager.signal_dimension == 2 diff --git a/hyperspy/tests/drawing/plot_markers/test_plot_eds_lines.png b/hyperspy/tests/drawing/plot_markers/test_plot_eds_lines.png index 6f3ad21f27..64d6ffdd2c 100644 Binary files a/hyperspy/tests/drawing/plot_markers/test_plot_eds_lines.png and b/hyperspy/tests/drawing/plot_markers/test_plot_eds_lines.png differ diff --git a/hyperspy/tests/drawing/plot_markers/test_plot_line_markers.png b/hyperspy/tests/drawing/plot_markers/test_plot_line_markers.png index 306ec46e8e..4856cc9c44 100644 Binary files a/hyperspy/tests/drawing/plot_markers/test_plot_line_markers.png and b/hyperspy/tests/drawing/plot_markers/test_plot_line_markers.png differ diff --git a/hyperspy/tests/drawing/plot_markers/test_plot_point_markers.png b/hyperspy/tests/drawing/plot_markers/test_plot_point_markers.png index 88273c369b..9479a0f6eb 100644 Binary files a/hyperspy/tests/drawing/plot_markers/test_plot_point_markers.png and b/hyperspy/tests/drawing/plot_markers/test_plot_point_markers.png differ diff --git a/hyperspy/tests/drawing/plot_markers/test_plot_rectange_markers.png b/hyperspy/tests/drawing/plot_markers/test_plot_rectange_markers.png index a191b15ff9..15017c8f98 100644 Binary files a/hyperspy/tests/drawing/plot_markers/test_plot_rectange_markers.png and b/hyperspy/tests/drawing/plot_markers/test_plot_rectange_markers.png differ diff --git a/hyperspy/tests/drawing/plot_markers/test_plot_text_markers_nav.png b/hyperspy/tests/drawing/plot_markers/test_plot_text_markers_nav.png index f1de4609cc..0d3ca6e2a8 100644 Binary files a/hyperspy/tests/drawing/plot_markers/test_plot_text_markers_nav.png and b/hyperspy/tests/drawing/plot_markers/test_plot_text_markers_nav.png differ diff --git a/hyperspy/tests/drawing/plot_markers/test_plot_text_markers_sig.png b/hyperspy/tests/drawing/plot_markers/test_plot_text_markers_sig.png index 85ada53681..9517df40ee 100644 Binary files a/hyperspy/tests/drawing/plot_markers/test_plot_text_markers_sig.png and b/hyperspy/tests/drawing/plot_markers/test_plot_text_markers_sig.png differ diff --git a/hyperspy/tests/drawing/plot_model/test_fit_EELS_convolved_False.png b/hyperspy/tests/drawing/plot_model/test_fit_EELS_convolved_False.png index 226d5c0045..ab4ce1572e 100644 Binary files a/hyperspy/tests/drawing/plot_model/test_fit_EELS_convolved_False.png and b/hyperspy/tests/drawing/plot_model/test_fit_EELS_convolved_False.png differ diff --git a/hyperspy/tests/drawing/plot_model/test_fit_EELS_convolved_True.png b/hyperspy/tests/drawing/plot_model/test_fit_EELS_convolved_True.png index 376b78b365..0ea45d14c7 100644 Binary files a/hyperspy/tests/drawing/plot_model/test_fit_EELS_convolved_True.png and b/hyperspy/tests/drawing/plot_model/test_fit_EELS_convolved_True.png differ diff --git a/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_False-False-False.png b/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_False-False-False.png index f9f8857a96..eb69bd8c4a 100644 Binary files a/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_False-False-False.png and b/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_False-False-False.png differ diff --git a/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_False-False-True.png b/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_False-False-True.png index a0ea0abcbf..0ae985f14f 100644 Binary files a/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_False-False-True.png and b/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_False-False-True.png differ diff --git a/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_False-True-False.png b/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_False-True-False.png index 6db189e939..1792a11d1f 100644 Binary files a/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_False-True-False.png and b/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_False-True-False.png differ diff --git a/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_False-True-True.png b/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_False-True-True.png index 377709f89e..b8226dc036 100644 Binary files a/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_False-True-True.png and b/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_False-True-True.png differ diff --git a/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_True-False-False.png b/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_True-False-False.png index 7b0b18dd5d..7623a771a7 100644 Binary files a/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_True-False-False.png and b/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_True-False-False.png differ diff --git a/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_True-False-True.png b/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_True-False-True.png index 7356a3be2d..d3d0287fdd 100644 Binary files a/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_True-False-True.png and b/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_True-False-True.png differ diff --git a/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_True-True-False.png b/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_True-True-False.png index 3fc7cf4393..14a423d40b 100644 Binary files a/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_True-True-False.png and b/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_True-True-False.png differ diff --git a/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_True-True-True.png b/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_True-True-True.png index 5e11bbe18c..80cea06f99 100644 Binary files a/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_True-True-True.png and b/hyperspy/tests/drawing/plot_model/test_plot_gaussian_eelsmodel_True-True-True.png differ diff --git a/hyperspy/tests/drawing/plot_model1d/test_default_navigator_plot.png b/hyperspy/tests/drawing/plot_model1d/test_default_navigator_plot.png new file mode 100644 index 0000000000..c1869e05e7 Binary files /dev/null and b/hyperspy/tests/drawing/plot_model1d/test_default_navigator_plot.png differ diff --git a/hyperspy/tests/drawing/plot_model1d/test_default_signal_plot.png b/hyperspy/tests/drawing/plot_model1d/test_default_signal_plot.png new file mode 100644 index 0000000000..965359057e Binary files /dev/null and b/hyperspy/tests/drawing/plot_model1d/test_default_signal_plot.png differ diff --git a/hyperspy/tests/drawing/plot_model1d/test_disable_plot_components.png b/hyperspy/tests/drawing/plot_model1d/test_disable_plot_components.png new file mode 100644 index 0000000000..965359057e Binary files /dev/null and b/hyperspy/tests/drawing/plot_model1d/test_disable_plot_components.png differ diff --git a/hyperspy/tests/drawing/plot_model1d/test_plot_components.png b/hyperspy/tests/drawing/plot_model1d/test_plot_components.png new file mode 100644 index 0000000000..e65b8e7d43 Binary files /dev/null and b/hyperspy/tests/drawing/plot_model1d/test_plot_components.png differ diff --git a/hyperspy/tests/drawing/plot_roi/test_plot_circle_roi_navigation.png b/hyperspy/tests/drawing/plot_roi/test_plot_circle_roi_navigation.png new file mode 100644 index 0000000000..f73e8b87c1 Binary files /dev/null and b/hyperspy/tests/drawing/plot_roi/test_plot_circle_roi_navigation.png differ diff --git a/hyperspy/tests/drawing/plot_roi/test_plot_circle_roi_signal.png b/hyperspy/tests/drawing/plot_roi/test_plot_circle_roi_signal.png new file mode 100644 index 0000000000..abdca14fc3 Binary files /dev/null and b/hyperspy/tests/drawing/plot_roi/test_plot_circle_roi_signal.png differ diff --git a/hyperspy/tests/drawing/plot_roi/test_plot_line2d_roi_navigation.png b/hyperspy/tests/drawing/plot_roi/test_plot_line2d_roi_navigation.png new file mode 100644 index 0000000000..c66b0afed3 Binary files /dev/null and b/hyperspy/tests/drawing/plot_roi/test_plot_line2d_roi_navigation.png differ diff --git a/hyperspy/tests/drawing/plot_roi/test_plot_line2d_roi_signal.png b/hyperspy/tests/drawing/plot_roi/test_plot_line2d_roi_signal.png new file mode 100644 index 0000000000..893023439d Binary files /dev/null and b/hyperspy/tests/drawing/plot_roi/test_plot_line2d_roi_signal.png differ diff --git a/hyperspy/tests/drawing/plot_roi/test_plot_point1D_axis_0.png b/hyperspy/tests/drawing/plot_roi/test_plot_point1D_axis_0.png new file mode 100644 index 0000000000..a4af7d6c60 Binary files /dev/null and b/hyperspy/tests/drawing/plot_roi/test_plot_point1D_axis_0.png differ diff --git a/hyperspy/tests/drawing/plot_roi/test_plot_point1D_axis_1.png b/hyperspy/tests/drawing/plot_roi/test_plot_point1D_axis_1.png new file mode 100644 index 0000000000..e6af739af4 Binary files /dev/null and b/hyperspy/tests/drawing/plot_roi/test_plot_point1D_axis_1.png differ diff --git a/hyperspy/tests/drawing/plot_roi/test_plot_point1D_axis_2.png b/hyperspy/tests/drawing/plot_roi/test_plot_point1D_axis_2.png new file mode 100644 index 0000000000..ee4a91c1ee Binary files /dev/null and b/hyperspy/tests/drawing/plot_roi/test_plot_point1D_axis_2.png differ diff --git a/hyperspy/tests/drawing/plot_roi/test_plot_point2D_navigation.png b/hyperspy/tests/drawing/plot_roi/test_plot_point2D_navigation.png new file mode 100644 index 0000000000..ba9f456dd5 Binary files /dev/null and b/hyperspy/tests/drawing/plot_roi/test_plot_point2D_navigation.png differ diff --git a/hyperspy/tests/drawing/plot_roi/test_plot_point2D_signal.png b/hyperspy/tests/drawing/plot_roi/test_plot_point2D_signal.png new file mode 100644 index 0000000000..ee7160d0bc Binary files /dev/null and b/hyperspy/tests/drawing/plot_roi/test_plot_point2D_signal.png differ diff --git a/hyperspy/tests/drawing/plot_roi/test_plot_rectangular_roi_navigation.png b/hyperspy/tests/drawing/plot_roi/test_plot_rectangular_roi_navigation.png new file mode 100644 index 0000000000..d541a4e987 Binary files /dev/null and b/hyperspy/tests/drawing/plot_roi/test_plot_rectangular_roi_navigation.png differ diff --git a/hyperspy/tests/drawing/plot_roi/test_plot_rectangular_roi_signal.png b/hyperspy/tests/drawing/plot_roi/test_plot_rectangular_roi_signal.png new file mode 100644 index 0000000000..ece4b4e0a6 Binary files /dev/null and b/hyperspy/tests/drawing/plot_roi/test_plot_rectangular_roi_signal.png differ diff --git a/hyperspy/tests/drawing/plot_roi/test_plot_spanroi_axis_0.png b/hyperspy/tests/drawing/plot_roi/test_plot_spanroi_axis_0.png new file mode 100644 index 0000000000..b1cee2bc3d Binary files /dev/null and b/hyperspy/tests/drawing/plot_roi/test_plot_spanroi_axis_0.png differ diff --git a/hyperspy/tests/drawing/plot_roi/test_plot_spanroi_axis_1.png b/hyperspy/tests/drawing/plot_roi/test_plot_spanroi_axis_1.png new file mode 100644 index 0000000000..ee0a7aa699 Binary files /dev/null and b/hyperspy/tests/drawing/plot_roi/test_plot_spanroi_axis_1.png differ diff --git a/hyperspy/tests/drawing/plot_roi/test_plot_spanroi_axis_2.png b/hyperspy/tests/drawing/plot_roi/test_plot_spanroi_axis_2.png new file mode 100644 index 0000000000..e6b69ee289 Binary files /dev/null and b/hyperspy/tests/drawing/plot_roi/test_plot_spanroi_axis_2.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_data_changed_event_1.png b/hyperspy/tests/drawing/plot_signal/test_plot_data_changed_event_1.png index 2d4609fff5..8585955d90 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_data_changed_event_1.png and b/hyperspy/tests/drawing/plot_signal/test_plot_data_changed_event_1.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_0-1-sig-complex_imag.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_0-1-sig-complex_imag.png index 18abbdbd17..ef13834020 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_0-1-sig-complex_imag.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_0-1-sig-complex_imag.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_0-1-sig-complex_real.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_0-1-sig-complex_real.png index 18abbdbd17..ef13834020 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_0-1-sig-complex_real.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_0-1-sig-complex_real.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_0-1-sig-real.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_0-1-sig-real.png index cdd1474299..331a720d2f 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_0-1-sig-real.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_0-1-sig-real.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_0-2-sig-complex_imag.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_0-2-sig-complex_imag.png index 68c2b0a512..f3cda1ba7f 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_0-2-sig-complex_imag.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_0-2-sig-complex_imag.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_0-2-sig-complex_real.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_0-2-sig-complex_real.png index 68c2b0a512..f3cda1ba7f 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_0-2-sig-complex_real.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_0-2-sig-complex_real.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_0-2-sig-real.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_0-2-sig-real.png index 72e53cfd45..a4cc6a812b 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_0-2-sig-real.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_0-2-sig-real.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-1-nav-complex_imag.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-1-nav-complex_imag.png index 094ee885fe..ec2109c04a 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-1-nav-complex_imag.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-1-nav-complex_imag.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-1-nav-complex_real.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-1-nav-complex_real.png index 094ee885fe..ec2109c04a 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-1-nav-complex_real.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-1-nav-complex_real.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-1-nav-real.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-1-nav-real.png index 70be6ee859..a4b133d72d 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-1-nav-real.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-1-nav-real.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-1-sig-complex_imag.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-1-sig-complex_imag.png index 9ccb1f5aed..bb5d51e908 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-1-sig-complex_imag.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-1-sig-complex_imag.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-1-sig-complex_real.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-1-sig-complex_real.png index 9ccb1f5aed..bb5d51e908 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-1-sig-complex_real.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-1-sig-complex_real.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-1-sig-real.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-1-sig-real.png index 0d785c5422..2397deb19c 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-1-sig-real.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-1-sig-real.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-2-nav-complex_imag.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-2-nav-complex_imag.png index 752f12dce7..9f8667dd53 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-2-nav-complex_imag.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-2-nav-complex_imag.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-2-nav-complex_real.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-2-nav-complex_real.png index 752f12dce7..2612af9ffd 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-2-nav-complex_real.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-2-nav-complex_real.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-2-nav-real.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-2-nav-real.png index a43c139405..8e4b01482c 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-2-nav-real.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-2-nav-real.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-2-sig-complex_imag.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-2-sig-complex_imag.png index ea12c97321..03afc43854 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-2-sig-complex_imag.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-2-sig-complex_imag.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-2-sig-complex_real.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-2-sig-complex_real.png index ea12c97321..03afc43854 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-2-sig-complex_real.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-2-sig-complex_real.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-2-sig-real.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-2-sig-real.png index 50bf0948e8..a3d72b60bf 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-2-sig-real.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_1-2-sig-real.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-1-nav-complex_imag.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-1-nav-complex_imag.png index 4b11aacc3b..206699426e 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-1-nav-complex_imag.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-1-nav-complex_imag.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-1-nav-complex_real.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-1-nav-complex_real.png index 4b11aacc3b..206699426e 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-1-nav-complex_real.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-1-nav-complex_real.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-1-nav-real.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-1-nav-real.png index d8bc0502bb..748fde6647 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-1-nav-real.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-1-nav-real.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-1-sig-complex_imag.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-1-sig-complex_imag.png index 0a5a5aa1f7..ea461140d9 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-1-sig-complex_imag.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-1-sig-complex_imag.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-1-sig-complex_real.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-1-sig-complex_real.png index 0a5a5aa1f7..ea461140d9 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-1-sig-complex_real.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-1-sig-complex_real.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-1-sig-real.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-1-sig-real.png index e2cd622348..70e8721c59 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-1-sig-real.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-1-sig-real.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-2-nav-complex_imag.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-2-nav-complex_imag.png index fd022ca848..7a3541ce5b 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-2-nav-complex_imag.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-2-nav-complex_imag.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-2-nav-complex_real.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-2-nav-complex_real.png index fd022ca848..7a3541ce5b 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-2-nav-complex_real.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-2-nav-complex_real.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-2-nav-real.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-2-nav-real.png index ab091d2383..92b6ad0878 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-2-nav-real.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-2-nav-real.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-2-sig-complex_imag.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-2-sig-complex_imag.png index 1b24055f5a..90ed334282 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-2-sig-complex_imag.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-2-sig-complex_imag.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-2-sig-complex_real.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-2-sig-complex_real.png index 1b24055f5a..90ed334282 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-2-sig-complex_real.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-2-sig-complex_real.png differ diff --git a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-2-sig-real.png b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-2-sig-real.png index b355c97669..cf8db130d6 100644 Binary files a/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-2-sig-real.png and b/hyperspy/tests/drawing/plot_signal/test_plot_sig_nav_2-2-sig-real.png differ diff --git a/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_cascade.png b/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_cascade.png index b74eed5919..3796f5e381 100644 Binary files a/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_cascade.png and b/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_cascade.png differ diff --git a/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_default.png b/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_default.png index 6fd47ac0f8..4294eb1e4a 100644 Binary files a/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_default.png and b/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_default.png differ diff --git a/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_heatmap.png b/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_heatmap.png index 9818cf5683..01be225513 100644 Binary files a/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_heatmap.png and b/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_heatmap.png differ diff --git a/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_mosaic.png b/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_mosaic.png index 1c5c483035..dba40dcbd2 100644 Binary files a/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_mosaic.png and b/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_mosaic.png differ diff --git a/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_overlap.png b/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_overlap.png index 6fd47ac0f8..4294eb1e4a 100644 Binary files a/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_overlap.png and b/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_overlap.png differ diff --git a/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_sync_1nav.png b/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_sync_1nav.png index 1fba4a602c..2d1371c74a 100644 Binary files a/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_sync_1nav.png and b/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_sync_1nav.png differ diff --git a/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_sync_1sig.png b/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_sync_1sig.png index a0cc5e3466..26724204fc 100644 Binary files a/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_sync_1sig.png and b/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_sync_1sig.png differ diff --git a/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_sync_2nav.png b/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_sync_2nav.png index 57353cf951..7768203141 100644 Binary files a/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_sync_2nav.png and b/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_sync_2nav.png differ diff --git a/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_sync_2sig.png b/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_sync_2sig.png index 57353cf951..7768203141 100644 Binary files a/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_sync_2sig.png and b/hyperspy/tests/drawing/plot_signal1d/test_plot_spectra_sync_2sig.png differ diff --git a/hyperspy/tests/drawing/plot_signal1d/test_plot_two_cursors_1-nav.png b/hyperspy/tests/drawing/plot_signal1d/test_plot_two_cursors_1-nav.png index 4a330ec07d..9b28e3729f 100644 Binary files a/hyperspy/tests/drawing/plot_signal1d/test_plot_two_cursors_1-nav.png and b/hyperspy/tests/drawing/plot_signal1d/test_plot_two_cursors_1-nav.png differ diff --git a/hyperspy/tests/drawing/plot_signal1d/test_plot_two_cursors_1-sig.png b/hyperspy/tests/drawing/plot_signal1d/test_plot_two_cursors_1-sig.png index e7b7a33334..6ac59569a1 100644 Binary files a/hyperspy/tests/drawing/plot_signal1d/test_plot_two_cursors_1-sig.png and b/hyperspy/tests/drawing/plot_signal1d/test_plot_two_cursors_1-sig.png differ diff --git a/hyperspy/tests/drawing/plot_signal1d/test_plot_two_cursors_2-nav.png b/hyperspy/tests/drawing/plot_signal1d/test_plot_two_cursors_2-nav.png index cf68b48261..10dc3e7a06 100644 Binary files a/hyperspy/tests/drawing/plot_signal1d/test_plot_two_cursors_2-nav.png and b/hyperspy/tests/drawing/plot_signal1d/test_plot_two_cursors_2-nav.png differ diff --git a/hyperspy/tests/drawing/plot_signal1d/test_plot_two_cursors_2-sig.png b/hyperspy/tests/drawing/plot_signal1d/test_plot_two_cursors_2-sig.png index 4a1877df46..db08b18b28 100644 Binary files a/hyperspy/tests/drawing/plot_signal1d/test_plot_two_cursors_2-sig.png and b/hyperspy/tests/drawing/plot_signal1d/test_plot_two_cursors_2-sig.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_plot_False-False-False-False.png b/hyperspy/tests/drawing/plot_signal2d/test_plot_False-False-False-False.png index fcb1ff925c..2feae7ab6a 100644 Binary files a/hyperspy/tests/drawing/plot_signal2d/test_plot_False-False-False-False.png and b/hyperspy/tests/drawing/plot_signal2d/test_plot_False-False-False-False.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_plot_False-False-False-True.png b/hyperspy/tests/drawing/plot_signal2d/test_plot_False-False-False-True.png index d137d83d35..e049c8b438 100644 Binary files a/hyperspy/tests/drawing/plot_signal2d/test_plot_False-False-False-True.png and b/hyperspy/tests/drawing/plot_signal2d/test_plot_False-False-False-True.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_plot_False-False-True-False.png b/hyperspy/tests/drawing/plot_signal2d/test_plot_False-False-True-False.png index b1fb73aad8..99da74c8dd 100644 Binary files a/hyperspy/tests/drawing/plot_signal2d/test_plot_False-False-True-False.png and b/hyperspy/tests/drawing/plot_signal2d/test_plot_False-False-True-False.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_plot_False-False-True-True.png b/hyperspy/tests/drawing/plot_signal2d/test_plot_False-False-True-True.png index 1092812b86..21d52a2cab 100644 Binary files a/hyperspy/tests/drawing/plot_signal2d/test_plot_False-False-True-True.png and b/hyperspy/tests/drawing/plot_signal2d/test_plot_False-False-True-True.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_plot_False-True-False-False.png b/hyperspy/tests/drawing/plot_signal2d/test_plot_False-True-False-False.png index 6a56000e85..68c934d232 100644 Binary files a/hyperspy/tests/drawing/plot_signal2d/test_plot_False-True-False-False.png and b/hyperspy/tests/drawing/plot_signal2d/test_plot_False-True-False-False.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_plot_False-True-False-True.png b/hyperspy/tests/drawing/plot_signal2d/test_plot_False-True-False-True.png index 18f418b097..a4098c9049 100644 Binary files a/hyperspy/tests/drawing/plot_signal2d/test_plot_False-True-False-True.png and b/hyperspy/tests/drawing/plot_signal2d/test_plot_False-True-False-True.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_plot_False-True-True-False.png b/hyperspy/tests/drawing/plot_signal2d/test_plot_False-True-True-False.png index 7f628778b6..f55fc3fc55 100644 Binary files a/hyperspy/tests/drawing/plot_signal2d/test_plot_False-True-True-False.png and b/hyperspy/tests/drawing/plot_signal2d/test_plot_False-True-True-False.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_plot_False-True-True-True.png b/hyperspy/tests/drawing/plot_signal2d/test_plot_False-True-True-True.png index 21a757e7e8..4c2ef93a25 100644 Binary files a/hyperspy/tests/drawing/plot_signal2d/test_plot_False-True-True-True.png and b/hyperspy/tests/drawing/plot_signal2d/test_plot_False-True-True-True.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_plot_True-False-False-False.png b/hyperspy/tests/drawing/plot_signal2d/test_plot_True-False-False-False.png index 59e302113c..aed5ab8759 100644 Binary files a/hyperspy/tests/drawing/plot_signal2d/test_plot_True-False-False-False.png and b/hyperspy/tests/drawing/plot_signal2d/test_plot_True-False-False-False.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_plot_True-False-False-True.png b/hyperspy/tests/drawing/plot_signal2d/test_plot_True-False-False-True.png index d269f04c24..7abe846d1e 100644 Binary files a/hyperspy/tests/drawing/plot_signal2d/test_plot_True-False-False-True.png and b/hyperspy/tests/drawing/plot_signal2d/test_plot_True-False-False-True.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_plot_True-False-True-False.png b/hyperspy/tests/drawing/plot_signal2d/test_plot_True-False-True-False.png index 6bd1ffb650..8f4f68cc04 100644 Binary files a/hyperspy/tests/drawing/plot_signal2d/test_plot_True-False-True-False.png and b/hyperspy/tests/drawing/plot_signal2d/test_plot_True-False-True-False.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_plot_True-False-True-True.png b/hyperspy/tests/drawing/plot_signal2d/test_plot_True-False-True-True.png index 142d6d19b1..324ab8fc2a 100644 Binary files a/hyperspy/tests/drawing/plot_signal2d/test_plot_True-False-True-True.png and b/hyperspy/tests/drawing/plot_signal2d/test_plot_True-False-True-True.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_plot_True-True-False-False.png b/hyperspy/tests/drawing/plot_signal2d/test_plot_True-True-False-False.png index 72e53cfd45..e9cfdd32b2 100644 Binary files a/hyperspy/tests/drawing/plot_signal2d/test_plot_True-True-False-False.png and b/hyperspy/tests/drawing/plot_signal2d/test_plot_True-True-False-False.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_plot_True-True-False-True.png b/hyperspy/tests/drawing/plot_signal2d/test_plot_True-True-False-True.png index fb2124dd98..0eb8eff867 100644 Binary files a/hyperspy/tests/drawing/plot_signal2d/test_plot_True-True-False-True.png and b/hyperspy/tests/drawing/plot_signal2d/test_plot_True-True-False-True.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_plot_True-True-True-False.png b/hyperspy/tests/drawing/plot_signal2d/test_plot_True-True-True-False.png index a6172b2b1a..775a90e475 100644 Binary files a/hyperspy/tests/drawing/plot_signal2d/test_plot_True-True-True-False.png and b/hyperspy/tests/drawing/plot_signal2d/test_plot_True-True-True-False.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_plot_True-True-True-True.png b/hyperspy/tests/drawing/plot_signal2d/test_plot_True-True-True-True.png index 562173df3d..c159676761 100644 Binary files a/hyperspy/tests/drawing/plot_signal2d/test_plot_True-True-True-True.png and b/hyperspy/tests/drawing/plot_signal2d/test_plot_True-True-True-True.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_plot_images_cmap_list.png b/hyperspy/tests/drawing/plot_signal2d/test_plot_images_cmap_list.png new file mode 100644 index 0000000000..ddfb75f94c Binary files /dev/null and b/hyperspy/tests/drawing/plot_signal2d/test_plot_images_cmap_list.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_plot_images_cmap_list_w_diverging.png b/hyperspy/tests/drawing/plot_signal2d/test_plot_images_cmap_list_w_diverging.png new file mode 100644 index 0000000000..989474372b Binary files /dev/null and b/hyperspy/tests/drawing/plot_signal2d/test_plot_images_cmap_list_w_diverging.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_plot_images_cmap_make_cmap_bitfalse.png b/hyperspy/tests/drawing/plot_signal2d/test_plot_images_cmap_make_cmap_bitfalse.png new file mode 100644 index 0000000000..de62aaaecb Binary files /dev/null and b/hyperspy/tests/drawing/plot_signal2d/test_plot_images_cmap_make_cmap_bitfalse.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_plot_images_cmap_make_cmap_bittrue.png b/hyperspy/tests/drawing/plot_signal2d/test_plot_images_cmap_make_cmap_bittrue.png new file mode 100644 index 0000000000..d103d459e5 Binary files /dev/null and b/hyperspy/tests/drawing/plot_signal2d/test_plot_images_cmap_make_cmap_bittrue.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_plot_images_cmap_mpl_colors.png b/hyperspy/tests/drawing/plot_signal2d/test_plot_images_cmap_mpl_colors.png new file mode 100644 index 0000000000..4b17eef397 Binary files /dev/null and b/hyperspy/tests/drawing/plot_signal2d/test_plot_images_cmap_mpl_colors.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_plot_images_cmap_multi_signal.png b/hyperspy/tests/drawing/plot_signal2d/test_plot_images_cmap_multi_signal.png new file mode 100644 index 0000000000..7d8a696848 Binary files /dev/null and b/hyperspy/tests/drawing/plot_signal2d/test_plot_images_cmap_multi_signal.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_plot_images_cmap_multi_w_rgb.png b/hyperspy/tests/drawing/plot_signal2d/test_plot_images_cmap_multi_w_rgb.png new file mode 100644 index 0000000000..81429434b1 Binary files /dev/null and b/hyperspy/tests/drawing/plot_signal2d/test_plot_images_cmap_multi_w_rgb.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_plot_images_cmap_one_string.png b/hyperspy/tests/drawing/plot_signal2d/test_plot_images_cmap_one_string.png new file mode 100644 index 0000000000..7703a37aa9 Binary files /dev/null and b/hyperspy/tests/drawing/plot_signal2d/test_plot_images_cmap_one_string.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_plot_images_default.png b/hyperspy/tests/drawing/plot_signal2d/test_plot_images_default.png new file mode 100644 index 0000000000..258bfe6582 Binary files /dev/null and b/hyperspy/tests/drawing/plot_signal2d/test_plot_images_default.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_plot_multiple_images_list_None-None.png b/hyperspy/tests/drawing/plot_signal2d/test_plot_multiple_images_list_None-None.png index 54b5ff33d9..5ad96ed0d0 100644 Binary files a/hyperspy/tests/drawing/plot_signal2d/test_plot_multiple_images_list_None-None.png and b/hyperspy/tests/drawing/plot_signal2d/test_plot_multiple_images_list_None-None.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_plot_multiple_images_list_vmin0-vmax0.png b/hyperspy/tests/drawing/plot_signal2d/test_plot_multiple_images_list_vmin0-vmax0.png index d7e36d4876..890a7ad03b 100644 Binary files a/hyperspy/tests/drawing/plot_signal2d/test_plot_multiple_images_list_vmin0-vmax0.png and b/hyperspy/tests/drawing/plot_signal2d/test_plot_multiple_images_list_vmin0-vmax0.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_rgb_image_global.png b/hyperspy/tests/drawing/plot_signal2d/test_rgb_image_global.png index 4b425848b4..bfe599e3d5 100644 Binary files a/hyperspy/tests/drawing/plot_signal2d/test_rgb_image_global.png and b/hyperspy/tests/drawing/plot_signal2d/test_rgb_image_global.png differ diff --git a/hyperspy/tests/drawing/plot_signal2d/test_rgb_image_single.png b/hyperspy/tests/drawing/plot_signal2d/test_rgb_image_single.png index 000fcf762e..1ff8b2c8de 100644 Binary files a/hyperspy/tests/drawing/plot_signal2d/test_rgb_image_single.png and b/hyperspy/tests/drawing/plot_signal2d/test_rgb_image_single.png differ diff --git a/hyperspy/tests/drawing/plot_signal_tools/test_plot_BackgroundRemoval.png b/hyperspy/tests/drawing/plot_signal_tools/test_plot_BackgroundRemoval.png new file mode 100644 index 0000000000..1d9c52623b Binary files /dev/null and b/hyperspy/tests/drawing/plot_signal_tools/test_plot_BackgroundRemoval.png differ diff --git a/hyperspy/tests/drawing/plot_widgets/test_plot_ModifiableSpanSelector.png b/hyperspy/tests/drawing/plot_widgets/test_plot_ModifiableSpanSelector.png new file mode 100644 index 0000000000..bb087cc2df Binary files /dev/null and b/hyperspy/tests/drawing/plot_widgets/test_plot_ModifiableSpanSelector.png differ diff --git a/hyperspy/tests/drawing/plot_widgets/test_plot_range.png b/hyperspy/tests/drawing/plot_widgets/test_plot_range.png new file mode 100644 index 0000000000..c229bf8b1e Binary files /dev/null and b/hyperspy/tests/drawing/plot_widgets/test_plot_range.png differ diff --git a/hyperspy/tests/drawing/plot_widgets/test_plot_range_Signal2D.png b/hyperspy/tests/drawing/plot_widgets/test_plot_range_Signal2D.png new file mode 100644 index 0000000000..d7cc70e063 Binary files /dev/null and b/hyperspy/tests/drawing/plot_widgets/test_plot_range_Signal2D.png differ diff --git a/hyperspy/tests/drawing/test_mpl_testing_setup.py b/hyperspy/tests/drawing/test_mpl_testing_setup.py index eeef3dd160..f7be024467 100644 --- a/hyperspy/tests/drawing/test_mpl_testing_setup.py +++ b/hyperspy/tests/drawing/test_mpl_testing_setup.py @@ -52,6 +52,6 @@ def test_plotting_test_working(mpl_cleanup): ax = fig.add_subplot(1, 1, 1) ax.plot([1, 2, 2]) # to generate a different plot uncomment the next line -# ax.plot([1, 2, 3, 4]) # Uncomment this line to make sure the test is -# properly failing + # ax.plot([1, 2, 3, 4]) # Uncomment this line to make sure the test is + # properly failing return fig diff --git a/hyperspy/tests/drawing/test_plot_markers.py b/hyperspy/tests/drawing/test_plot_markers.py index 1a66a56857..bf8bee7940 100644 --- a/hyperspy/tests/drawing/test_plot_markers.py +++ b/hyperspy/tests/drawing/test_plot_markers.py @@ -19,7 +19,7 @@ import pytest from hyperspy.misc.test_utils import update_close_figure, sanitize_dict -from hyperspy.signals import Signal2D, Signal1D +from hyperspy.signals import Signal2D, Signal1D, BaseSignal from hyperspy.utils import markers, stack from hyperspy.drawing.marker import dict2marker from hyperspy.datasets.example_signals import EDS_TEM_Spectrum @@ -469,11 +469,47 @@ def test_plot_line_markers_close(): baseline_dir=baseline_dir, tolerance=default_tol, style=style_pytest_mpl) def test_plot_eds_lines(): a = EDS_TEM_Spectrum() - s = stack([a, a*5]) + s = stack([a, a * 5]) s.plot(True) s.axes_manager.navigation_axes[0].index = 1 return s._plot.signal_plot.figure +def test_iterate_markers(): + from skimage.feature import peak_local_max + import scipy.misc + ims = BaseSignal(scipy.misc.face()).as_signal2D([1,2]) + index = np.array([peak_local_max(im.data, min_distance=100, + num_peaks=4) for im in ims]) + # Add multiple markers + for i in range(4): + xs = index[:, i, 1] + ys = index[:, i, 0] + m = markers.point(x=xs, y=ys, color='red') + ims.add_marker(m, plot_marker=True, permanent=True) + m = markers.text(x=10+xs, y=10+ys, text=str(i), color='k') + ims.add_marker(m, plot_marker=True, permanent=True) + xs = index[:, :, 1] + ys = index[:, :, 0] + m = markers.rectangle(np.min(xs, 1), + np.min(ys, 1), + np.max(xs, 1), + np.max(ys, 1), + color='green') + ims.add_marker(m, plot_marker=True, permanent=True) + + for im in ims: + m_original = ims.metadata.Markers + m_iterated = im.metadata.Markers + for key in m_original.keys(): + mo = m_original[key] + mi = m_iterated[key] + assert mo.__class__.__name__ == mi.__class__.__name__ + assert mo.name == mi.name + assert mo.get_data_position('x1') == mi.get_data_position('x1') + assert mo.get_data_position('y1') == mi.get_data_position('y1') + assert mo.get_data_position('text') == mi.get_data_position('text') + assert mo.marker_properties['color'] == \ + mi.marker_properties['color'] @update_close_figure def test_plot_eds_markers_close(): diff --git a/hyperspy/tests/drawing/test_plot_model.py b/hyperspy/tests/drawing/test_plot_model.py index 1612904e08..e7f2da98c6 100644 --- a/hyperspy/tests/drawing/test_plot_model.py +++ b/hyperspy/tests/drawing/test_plot_model.py @@ -53,16 +53,16 @@ def create_ll_signal(signal_shape=1000): def create_sum_of_gaussians(convolved=False): param1 = {'A': A_value_gaussian[0], - 'centre': centre_value_gaussian[0]/scale, - 'sigma': sigma_value_gaussian[0]/scale} + 'centre': centre_value_gaussian[0] / scale, + 'sigma': sigma_value_gaussian[0] / scale} gs1 = Gaussian(**param1) param2 = {'A': A_value_gaussian[1], - 'centre': centre_value_gaussian[1]/scale, - 'sigma': sigma_value_gaussian[1]/scale} + 'centre': centre_value_gaussian[1] / scale, + 'sigma': sigma_value_gaussian[1] / scale} gs2 = Gaussian(**param2) param3 = {'A': A_value_gaussian[2], - 'centre': centre_value_gaussian[2]/scale, - 'sigma': sigma_value_gaussian[2]/scale} + 'centre': centre_value_gaussian[2] / scale, + 'sigma': sigma_value_gaussian[2] / scale} gs3 = Gaussian(**param3) axis = np.arange(1000) diff --git a/hyperspy/tests/drawing/test_plot_model1d.py b/hyperspy/tests/drawing/test_plot_model1d.py new file mode 100644 index 0000000000..60cda2d35a --- /dev/null +++ b/hyperspy/tests/drawing/test_plot_model1d.py @@ -0,0 +1,66 @@ +# Copyright 2007-2016 The HyperSpy developers +# +# This file is part of HyperSpy. +# +# HyperSpy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HyperSpy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HyperSpy. If not, see . + +import numpy as np +import pytest + +from hyperspy.signals import Signal1D +from hyperspy.components1d import Expression + +DEFAULT_TOL = 2.0 +BASELINE_DIR = 'plot_model1d' +STYLE_PYTEST_MPL = 'default' + + +class TestModelPlot: + def setup_method(self, method): + s = Signal1D(np.arange(1000).reshape((10, 100))) + np.random.seed(0) + s.add_poissonian_noise() + m = s.create_model() + line = Expression("a * x", name="line", a=1) + m.append(line) + self.m = m + + @pytest.mark.mpl_image_compare( + baseline_dir=BASELINE_DIR, tolerance=DEFAULT_TOL, style=STYLE_PYTEST_MPL) + def test_default_signal_plot(self): + self.m.plot() + return self.m._plot.signal_plot.figure + + @pytest.mark.mpl_image_compare( + baseline_dir=BASELINE_DIR, tolerance=DEFAULT_TOL, style=STYLE_PYTEST_MPL) + def test_plot_components(self): + self.m.plot(plot_components=True) + return self.m._plot.signal_plot.figure + + @pytest.mark.mpl_image_compare( + baseline_dir=BASELINE_DIR, tolerance=DEFAULT_TOL, style=STYLE_PYTEST_MPL) + def test_disable_plot_components(self): + self.m.plot(plot_components=True) + self.m.disable_plot_components() + return self.m._plot.signal_plot.figure + + @pytest.mark.mpl_image_compare( + baseline_dir=BASELINE_DIR, tolerance=DEFAULT_TOL, style=STYLE_PYTEST_MPL) + def test_default_navigator_plot(self): + self.m.plot() + return self.m._plot.navigator_plot.figure + + def test_no_navigator(self): + self.m.plot(navigator=None) + assert self.m.signal._plot.navigator_plot is None diff --git a/hyperspy/tests/drawing/test_plot_roi_widgets.py b/hyperspy/tests/drawing/test_plot_roi_widgets.py new file mode 100644 index 0000000000..472bc1b285 --- /dev/null +++ b/hyperspy/tests/drawing/test_plot_roi_widgets.py @@ -0,0 +1,143 @@ +# -*- coding: utf-8 -*- +# Copyright 2007-2016 The HyperSpy developers +# +# This file is part of HyperSpy. +# +# HyperSpy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HyperSpy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HyperSpy. If not, see . + +import numpy as np +import numpy.testing as nt +import pytest +import matplotlib + +from hyperspy.signals import Signal2D, Signal1D +from hyperspy.utils import roi + + +BASELINE_DIR = 'plot_roi' +DEFAULT_TOL = 2.0 +STYLE_PYTEST_MPL = 'default' + + +def _transpose_space(space, im): + if space == "signal": + im = im + axes = im.axes_manager.signal_axes + im.plot() + figure = im._plot.signal_plot.figure + else: + im = im.T + axes = im.axes_manager.navigation_axes + im.plot() + figure = im._plot.navigator_plot.figure + return { + "im": im, + "figure": figure, + "axes": axes, + } + + +class TestPlotROI(): + + def setup_method(self, method): + # Create test image 100x100 pixels: + self.im = Signal2D(np.arange(50000).reshape([10, 50, 100])) + self.im.axes_manager[0].scale = 1e-1 + self.im.axes_manager[1].scale = 1e-2 + self.im.axes_manager[2].scale = 1e-3 + + @pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR, + tolerance=DEFAULT_TOL, style=STYLE_PYTEST_MPL) + def test_plot_point1D_axis_0(self, mpl_cleanup): + self.im.plot() + p = roi.Point1DROI(0.5) + p.add_widget(signal=self.im, axes=[0, ], color="cyan") + return self.im._plot.navigator_plot.figure + + @pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR, + tolerance=DEFAULT_TOL, style=STYLE_PYTEST_MPL) + def test_plot_point1D_axis_1(self, mpl_cleanup): + self.im.plot() + p = roi.Point1DROI(0.05) + p.add_widget(signal=self.im, axes=[1, ], color="cyan") + return self.im._plot.signal_plot.figure + + @pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR, + tolerance=DEFAULT_TOL, style=STYLE_PYTEST_MPL) + def test_plot_point1D_axis_2(self, mpl_cleanup): + self.im.plot() + p = roi.Point1DROI(0.005) + p.add_widget(signal=self.im, axes=[2, ], color="cyan") + return self.im._plot.signal_plot.figure + + @pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR, + tolerance=DEFAULT_TOL, style=STYLE_PYTEST_MPL) + def test_plot_spanroi_axis_0(self, mpl_cleanup): + self.im.plot() + p = roi.SpanROI(0.5, 0.7) + p.add_widget(signal=self.im, axes=[0, ], color="cyan") + return self.im._plot.navigator_plot.figure + + @pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR, + tolerance=DEFAULT_TOL, style=STYLE_PYTEST_MPL) + def test_plot_spanroi_axis_1(self, mpl_cleanup): + self.im.plot() + p = roi.SpanROI(0.05, 0.07) + p.add_widget(signal=self.im, axes=[1, ], color="cyan") + return self.im._plot.signal_plot.figure + + @pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR, + tolerance=DEFAULT_TOL, style=STYLE_PYTEST_MPL) + def test_plot_spanroi_axis_2(self, mpl_cleanup): + self.im.plot() + p = roi.SpanROI(0.005, 0.007) + p.add_widget(signal=self.im, axes=[2, ], color="cyan") + return self.im._plot.signal_plot.figure + + @pytest.mark.parametrize("space", ("signal", "navigation")) + @pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR, + tolerance=DEFAULT_TOL, style=STYLE_PYTEST_MPL) + def test_plot_point2D(self, mpl_cleanup, space): + objs = _transpose_space(im=self.im, space=space) + p = roi.Point2DROI(0.05, 0.01) + p.add_widget(signal=objs["im"], axes=objs["axes"], color="cyan") + return objs["figure"] + + @pytest.mark.parametrize("space", ("signal", "navigation")) + @pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR, + tolerance=DEFAULT_TOL, style=STYLE_PYTEST_MPL) + def test_plot_circle_roi(self, mpl_cleanup, space): + self.im.axes_manager[2].scale = 0.01 + objs = _transpose_space(im=self.im, space=space) + p = roi.CircleROI(cx=0.1, cy=0.1, r=0.1) + p.add_widget(signal=objs["im"], axes=objs["axes"], color="cyan") + return objs["figure"] + + @pytest.mark.parametrize("space", ("signal", "navigation")) + @pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR, + tolerance=DEFAULT_TOL, style=STYLE_PYTEST_MPL) + def test_plot_rectangular_roi(self, mpl_cleanup, space): + objs = _transpose_space(im=self.im, space=space) + p = roi.RectangularROI(left=0.01, top=0.01, right=0.1, bottom=0.03) + p.add_widget(signal=objs["im"], axes=objs["axes"], color="cyan") + return objs["figure"] + + @pytest.mark.parametrize("space", ("signal", "navigation")) + @pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR, + tolerance=DEFAULT_TOL, style=STYLE_PYTEST_MPL) + def test_plot_line2d_roi(self, mpl_cleanup, space): + objs = _transpose_space(im=self.im, space=space) + p = roi.Line2DROI(x1=0.01, y1=0.01, x2=0.1, y2=0.03) + p.add_widget(signal=objs["im"], axes=objs["axes"], color="cyan") + return objs["figure"] diff --git a/hyperspy/tests/drawing/test_plot_signal.py b/hyperspy/tests/drawing/test_plot_signal.py index 3799417cdd..cf136457d9 100644 --- a/hyperspy/tests/drawing/test_plot_signal.py +++ b/hyperspy/tests/drawing/test_plot_signal.py @@ -47,14 +47,14 @@ def __init__(self, ndim, sdim, data_type='real'): s = hs.signals.__dict__['%sSignal%iD' % (dtype, sdim)](data) if sdim == 1: s.axes_manager = self._set_signal_axes(s.axes_manager, name='Energy', - units='eV', scale=500.0, offset=300.0) + units='keV', scale=.5, offset=0.3) elif sdim == 2: s.axes_manager = self._set_signal_axes(s.axes_manager, name='Reciprocal distance', units='1/nm', scale=1, offset=0.0) if ndim > 0: s.axes_manager = self._set_navigation_axes(s.axes_manager, name='', - units='m', scale=1E-6, - offset=5E-6) + units='nm', scale=1.0, + offset=5.0) s.metadata.General.title = title # workaround to be able to access the figure in case of complex 2d # signals diff --git a/hyperspy/tests/drawing/test_plot_signal1d.py b/hyperspy/tests/drawing/test_plot_signal1d.py index 2be1ca4ed6..794ec147fc 100644 --- a/hyperspy/tests/drawing/test_plot_signal1d.py +++ b/hyperspy/tests/drawing/test_plot_signal1d.py @@ -15,6 +15,7 @@ # You should have received a copy of the GNU General Public License # along with HyperSpy. If not, see . +import numpy as np import scipy.misc import pytest import matplotlib.pyplot as plt @@ -141,6 +142,25 @@ def test_plot_spectra_sync(self, mpl_cleanup, figure): if figure == '2sig': return s2._plot.navigator_plot.figure + def test_plot_spectra_legend_pick(self, mpl_cleanup): + x = np.linspace(0., 2., 512) + n = np.arange(1, 5) + x_pow_n = x[None, :]**n[:, None] + s = hs.signals.Signal1D(x_pow_n) + my_legend = [r'x^' + str(io) for io in n] + f = plt.figure() + ax = hs.plot.plot_spectra(s, legend=my_legend, fig=f) + leg = ax.get_legend() + leg_artists = leg.get_lines() + click = plt.matplotlib.backend_bases.MouseEvent( + 'button_press_event', f.canvas, 0, 0, 'left') + for artist, li in zip(leg_artists, ax.lines[::-1]): + plt.matplotlib.backends.backend_agg.FigureCanvasBase.pick_event( + f.canvas, click, artist) + assert not li.get_visible() + plt.matplotlib.backends.backend_agg.FigureCanvasBase.pick_event( + f.canvas, click, artist) + @update_close_figure def test_plot_nav0_close(): diff --git a/hyperspy/tests/drawing/test_plot_signal2d.py b/hyperspy/tests/drawing/test_plot_signal2d.py index e7f3749dc1..35bdc276d4 100644 --- a/hyperspy/tests/drawing/test_plot_signal2d.py +++ b/hyperspy/tests/drawing/test_plot_signal2d.py @@ -24,7 +24,7 @@ import hyperspy.api as hs from hyperspy.drawing.utils import plot_RGB_map from hyperspy.tests.drawing.test_plot_signal import _TestPlot - +from hyperspy.drawing.utils import make_cmap scalebar_color = 'blue' default_tol = 2.0 @@ -148,11 +148,198 @@ def test_plot_multiple_images_list(mpl_cleanup, vmin, vmax): axesRGB[1].units = "nm" hs.plot.plot_images([image0, image1, image2, rgb], tight_layout=True, - # colorbar='single', labelwrap=20, vmin=vmin, vmax=vmax) return plt.gcf() +class _TestIteratedSignal: + + def __init__(self): + s = hs.signals.Signal2D([scipy.misc.ascent()] * 6) + angles = hs.signals.BaseSignal(range(00, 60, 10)) + s.map(scipy.ndimage.rotate, angle=angles.T, reshape=False) + # prevent values outside of integer range + s.data = np.clip(s.data, 0, 255) + title = 'Ascent' + + s.axes_manager = self._set_signal_axes(s.axes_manager, + name='spatial', + units='nm', scale=1, + offset=0.0) + s.axes_manager = self._set_navigation_axes(s.axes_manager, + name='index', + units='images', + scale=1, offset=0) + s.metadata.General.title = title + + self.signal = s + + def _set_navigation_axes(self, axes_manager, name=t.Undefined, + units=t.Undefined, scale=1.0, offset=0.0): + for nav_axis in axes_manager.navigation_axes: + nav_axis.units = units + nav_axis.scale = scale + nav_axis.offset = offset + return axes_manager + + def _set_signal_axes(self, axes_manager, name=t.Undefined, + units=t.Undefined, scale=1.0, offset=0.0): + for sig_axis in axes_manager.signal_axes: + sig_axis.name = name + sig_axis.units = units + sig_axis.scale = scale + sig_axis.offset = offset + return axes_manager + + +@pytest.mark.mpl_image_compare( + baseline_dir=baseline_dir, tolerance=default_tol, style=style_pytest_mpl) +def test_plot_images_default(mpl_cleanup): + test_im_plot = _TestIteratedSignal() + hs.plot.plot_images(test_im_plot.signal) + return plt.gcf() + + +@pytest.mark.mpl_image_compare( + baseline_dir=baseline_dir, tolerance=default_tol, style=style_pytest_mpl) +def test_plot_images_cmap_list(mpl_cleanup): + test_im_plot = _TestIteratedSignal() + hs.plot.plot_images(test_im_plot.signal, + axes_decor='off', + cmap=['viridis', 'gray']) + return plt.gcf() + + +@pytest.mark.mpl_image_compare( + baseline_dir=baseline_dir, tolerance=default_tol, style=style_pytest_mpl) +def test_plot_images_cmap_list_w_diverging(mpl_cleanup): + test_im_plot = _TestIteratedSignal() + hs.plot.plot_images(test_im_plot.signal, + axes_decor='off', + cmap=['viridis', 'gray', 'RdBu_r']) + return plt.gcf() + + +@pytest.mark.mpl_image_compare( + baseline_dir=baseline_dir, tolerance=default_tol, style=style_pytest_mpl) +def test_plot_images_cmap_mpl_colors(mpl_cleanup): + test_im_plot = _TestIteratedSignal() + hs.plot.plot_images(test_im_plot.signal, + axes_decor='off', + cmap='mpl_colors') + return plt.gcf() + + +def test_plot_images_cmap_mpl_colors_w_single_cbar(): + # This should give an error, so test for that + test_im_plot = _TestIteratedSignal() + with pytest.raises(ValueError) as val_error: + hs.plot.plot_images(test_im_plot.signal, + axes_decor='off', + cmap='mpl_colors', + colorbar='single') + assert str(val_error.value) == 'Cannot use a single colorbar with ' \ + 'multiple colormaps. Please check for ' \ + 'compatible arguments.' + + +def test_plot_images_bogus_cmap(): + # This should give an error, so test for that + test_im_plot = _TestIteratedSignal() + with pytest.raises(ValueError) as val_error: + hs.plot.plot_images(test_im_plot.signal, + axes_decor='off', + cmap=3.14159265359, + colorbar=None) + assert str(val_error.value) == 'The provided cmap value was not ' \ + 'understood. Please check input values.' + + +@pytest.mark.mpl_image_compare( + baseline_dir=baseline_dir, tolerance=default_tol, style=style_pytest_mpl) +def test_plot_images_cmap_one_string(mpl_cleanup): + test_im_plot = _TestIteratedSignal() + hs.plot.plot_images(test_im_plot.signal, + axes_decor='off', + cmap='RdBu_r', + colorbar='single') + return plt.gcf() + + +@pytest.mark.mpl_image_compare( + baseline_dir=baseline_dir, tolerance=default_tol, style=style_pytest_mpl) +def test_plot_images_cmap_make_cmap_bittrue(mpl_cleanup): + test_im_plot = _TestIteratedSignal() + hs.plot.plot_images(test_im_plot.signal, + axes_decor='off', + cmap=make_cmap([(255, 255, 255), + '#F5B0CB', + (220, 106, 207), + '#745C97', + (57, 55, 91)], + bit=True, + name='test_cmap', + register=True)) + return plt.gcf() + + +@pytest.mark.mpl_image_compare( + baseline_dir=baseline_dir, tolerance=default_tol, style=style_pytest_mpl) +def test_plot_images_cmap_make_cmap_bitfalse(mpl_cleanup): + test_im_plot = _TestIteratedSignal() + hs.plot.plot_images(test_im_plot.signal, + axes_decor='off', + cmap=make_cmap([(1, 1, 1), + '#F5B0CB', + (0.86, 0.42, 0.81), + '#745C97', + (0.22, 0.22, 0.36)], + bit=False, + name='test_cmap', + register=True)) + return plt.gcf() + + +@pytest.mark.mpl_image_compare( + baseline_dir=baseline_dir, tolerance=default_tol, style=style_pytest_mpl) +def test_plot_images_cmap_multi_signal(mpl_cleanup): + test_plot1 = _TestIteratedSignal() + + test_plot2 = _TestIteratedSignal() + test_plot2.signal *= 2 # change scale of second signal + test_plot2.signal = test_plot2.signal.inav[::-1] + test_plot2.signal.metadata.General.title = 'Descent' + + hs.plot.plot_images([test_plot1.signal, + test_plot2.signal], + axes_decor='off', + per_row=4, + cmap='mpl_colors') + return plt.gcf() + + +@pytest.mark.mpl_image_compare( + baseline_dir=baseline_dir, tolerance=default_tol, style=style_pytest_mpl) +def test_plot_images_cmap_multi_w_rgb(mpl_cleanup): + test_plot1 = _TestIteratedSignal() + + test_plot2 = _TestIteratedSignal() + test_plot2.signal *= 2 # change scale of second signal + test_plot2.signal.metadata.General.title = 'Ascent-2' + + rgb_sig = hs.signals.Signal1D(scipy.misc.face()) + rgb_sig.change_dtype('rgb8') + rgb_sig.metadata.General.title = 'Racoon!' + + hs.plot.plot_images([test_plot1.signal, + test_plot2.signal, + rgb_sig], + axes_decor='off', + per_row=4, + cmap='mpl_colors') + return plt.gcf() + + @pytest.mark.mpl_image_compare( baseline_dir=baseline_dir, tolerance=default_tol, style=style_pytest_mpl) def test_plot_images_single_image(mpl_cleanup): @@ -162,6 +349,7 @@ def test_plot_images_single_image(mpl_cleanup): ax = hs.plot.plot_images(image0, saturated_pixels=0.1) return plt.gcf() + @pytest.mark.mpl_image_compare( baseline_dir=baseline_dir, tolerance=default_tol, style=style_pytest_mpl) def test_plot_images_single_image_stack(mpl_cleanup): @@ -171,6 +359,31 @@ def test_plot_images_single_image_stack(mpl_cleanup): ax = hs.plot.plot_images(image0, saturated_pixels=0.1) return plt.gcf() + +def test_plot_images_multi_signal_w_axes_replot(mpl_cleanup): + imdata = np.random.rand(3, 5, 5) + imgs = hs.signals.Signal2D(imdata) + img_list = [imgs, imgs.inav[:2], imgs.inav[0]] + subplots = hs.plot.plot_images(img_list, axes_decor=None) + f = plt.gcf() + f.canvas.draw() + f.canvas.flush_events() + + tests = [] + for axi in subplots: + imi = axi.images[0].get_array() + x, y = axi.transData.transform((2, 2)) + # Calling base class method because of backends + plt.matplotlib.backends.backend_agg.FigureCanvasBase.button_press_event( + f.canvas, x, y, 'left', True) + fn = plt.gcf() + tests.append( + np.allclose(imi, fn.axes[0].images[0].get_array().data)) + plt.close(fn) + assert np.alltrue(tests) + return f + + @pytest.mark.parametrize("saturated_pixels", [5.0, [0.0, 20.0, 40.0], [10.0, 20.0], [10.0, None, 20.0]]) @pytest.mark.mpl_image_compare( diff --git a/hyperspy/tests/drawing/test_plot_signal_tools.py b/hyperspy/tests/drawing/test_plot_signal_tools.py new file mode 100644 index 0000000000..fb78d85827 --- /dev/null +++ b/hyperspy/tests/drawing/test_plot_signal_tools.py @@ -0,0 +1,50 @@ +# Copyright 2007-2016 The HyperSpy developers +# +# This file is part of HyperSpy. +# +# HyperSpy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HyperSpy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HyperSpy. If not, see . + +import numpy as np +import pytest + +from hyperspy import signals, components1d +from hyperspy._signals.signal1d import BackgroundRemoval + + +BASELINE_DIR = "plot_signal_tools" +DEFAULT_TOL = 2.0 +STYLE_PYTEST_MPL = 'default' + + +@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR, + tolerance=DEFAULT_TOL, style=STYLE_PYTEST_MPL) +def test_plot_BackgroundRemoval(): + pl = components1d.PowerLaw() + pl.A.value = 1e10 + pl.r.value = 3 + s = signals.Signal1D(pl.function(np.arange(100, 200))) + s.axes_manager[0].offset = 100 + + br = BackgroundRemoval(s, + background_type='Power Law', + polynomial_order=2, + fast=True, + plot_remainder=True, + show_progressbar=None) + + br.span_selector.set_initial((105, 115)) + br.span_selector.onmove_callback() + + return br.signal._plot.signal_plot.figure + diff --git a/hyperspy/tests/drawing/test_plot_widgets.py b/hyperspy/tests/drawing/test_plot_widgets.py index 3b0ddff4ef..dce492fe28 100644 --- a/hyperspy/tests/drawing/test_plot_widgets.py +++ b/hyperspy/tests/drawing/test_plot_widgets.py @@ -19,8 +19,9 @@ import numpy as np import numpy.testing as nt import pytest +import matplotlib -from hyperspy.signals import Signal2D +from hyperspy.signals import Signal2D, Signal1D from hyperspy.drawing import widgets @@ -48,14 +49,14 @@ def test_init(self): assert self.line2d.position == ([0.0, 0.0], [1.2, 0.0]) nt.assert_allclose(self.line2d.indices[0], np.array([0, 0])) nt.assert_allclose(self.line2d.indices[1], np.array([1, 0])) - nt.assert_allclose(self.line2d.get_centre(), np.array([0.6, 0.])) + nt.assert_allclose(self.line2d.get_centre(), np.array([0.6, 0.])) def test_position(self): self.line2d.position = ([12.0, 60.0], [36.0, 96.0]) assert self.line2d.position == ([12.0, 60.0], [36.0, 96.0]) nt.assert_allclose(self.line2d.indices[0], np.array([10, 50])) nt.assert_allclose(self.line2d.indices[1], np.array([30, 80])) - nt.assert_allclose(self.line2d.get_centre(), np.array([24., 78.])) + nt.assert_allclose(self.line2d.get_centre(), np.array([24., 78.])) def test_position_snap_position(self): self.line2d.snap_position = True @@ -63,24 +64,24 @@ def test_position_snap_position(self): nt.assert_allclose(self.line2d.position, ([12.0, 61.2], [36.0, 96.0])) nt.assert_allclose(self.line2d.indices[0], np.array([10, 51])) nt.assert_allclose(self.line2d.indices[1], np.array([30, 80])) - nt.assert_allclose(self.line2d.get_centre(), np.array([24., 78.6])) + nt.assert_allclose(self.line2d.get_centre(), np.array([24., 78.6])) def test_indices(self): self.line2d.indices = ([10, 50], [30, 80]) nt.assert_allclose(self.line2d.indices[0], np.array([10, 50])) nt.assert_allclose(self.line2d.indices[1], np.array([30, 80])) assert self.line2d.position == ([12.0, 60.0], [36.0, 96.0]) - nt.assert_allclose(self.line2d.get_centre(), np.array([24., 78.])) + nt.assert_allclose(self.line2d.get_centre(), np.array([24., 78.])) def test_length(self): x = 10 - self.line2d.position = ([10.0, 10.0], [10.0+x, 10.0]) + self.line2d.position = ([10.0, 10.0], [10.0 + x, 10.0]) assert self.line2d.get_line_length() == x y = 20 - self.line2d.position = ([20.0, 10.0], [20.0+x, 10+y]) + self.line2d.position = ([20.0, 10.0], [20.0 + x, 10 + y]) nt.assert_almost_equal(self.line2d.get_line_length(), - np.sqrt(x**2+y**2)) + np.sqrt(x**2 + y**2)) def test_change_size(self, mpl_cleanup): # Need to plot the signal to set the mpl axis to the widget @@ -109,7 +110,7 @@ def test_change_size_snap_size(self, mpl_cleanup): assert self.line2d.position == ([12.0, 60.0], [36.0, 96.0]) nt.assert_allclose(self.line2d.indices[0], np.array([10, 50])) nt.assert_allclose(self.line2d.indices[1], np.array([30, 80])) - nt.assert_allclose(self.line2d.get_centre(), np.array([24., 78.])) + nt.assert_allclose(self.line2d.get_centre(), np.array([24., 78.])) assert self.line2d.size == np.array([0]) self.line2d.size = [3] @@ -158,3 +159,96 @@ def test_plot_line2d(self, mpl_cleanup): nt.assert_allclose(line2d_snap_all.size[0], 14.4) return self.im._plot.signal_plot.figure + + +class TestPlotRangeWidget(): + + def setup_method(self, method): + self.s = Signal1D(np.arange(50)) + self.s.axes_manager[0].scale = 1.2 + self.range = widgets.RangeWidget(self.s.axes_manager) + + @pytest.mark.mpl_image_compare(baseline_dir=baseline_dir, + tolerance=default_tol, style=style_pytest_mpl) + def test_plot_range(self, mpl_cleanup): + self.s.plot() + self.range.set_mpl_ax(self.s._plot.signal_plot.ax) + assert self.range.ax == self.s._plot.signal_plot.ax + assert self.range.color == 'red' # default color + assert self.range.position == (0.0, ) + assert self.range.size == (1.2, ) + assert self.range.span.rect.get_alpha() == 0.5 + + w = widgets.RangeWidget(self.s.axes_manager, color='blue') + w.set_mpl_ax(self.s._plot.signal_plot.ax) + w.set_ibounds(left=4, width=3) + assert w.color == 'blue' + color_rgba = matplotlib.colors.to_rgba('blue', alpha=0.5) + assert w.span.rect.get_fc() == color_rgba + assert w.span.rect.get_ec() == color_rgba + nt.assert_allclose(w.position[0], 4.8) + nt.assert_allclose(w.size[0], 3.6) + + w2 = widgets.RangeWidget(self.s.axes_manager) + w2.set_mpl_ax(self.s._plot.signal_plot.ax) + assert w2.ax == self.s._plot.signal_plot.ax + + w2.set_bounds(left=24.0, width=12.0) + w2.color = 'green' + assert w2.color == 'green' + w2.alpha = 0.25 + assert w2.alpha == 0.25 + + return self.s._plot.signal_plot.figure + + @pytest.mark.mpl_image_compare(baseline_dir=baseline_dir, + tolerance=default_tol, style=style_pytest_mpl) + def test_plot_range_Signal2D(self, mpl_cleanup): + im = Signal2D(np.arange(10 * 10).reshape((10, 10))) + im.axes_manager[0].scale = 0.1 + im.axes_manager[1].scale = 5 + im.plot() + + range_h = widgets.RangeWidget(im.axes_manager, direction='horizontal') + range_h.set_mpl_ax(im._plot.signal_plot.ax) + + range_v = widgets.RangeWidget(im.axes_manager, direction='vertical', + color='blue') + range_v.axes = (im.axes_manager[1],) + range_v.set_mpl_ax(im._plot.signal_plot.ax) + assert range_v.position == (0.0, ) + assert range_v.size == (5.0, ) + + range_v.set_bounds(left=20.0, width=15.0) + assert range_v.position == (20.0, ) + assert range_v.size == (15.0, ) + + return im._plot.signal_plot.figure + + @pytest.mark.mpl_image_compare(baseline_dir=baseline_dir, + tolerance=default_tol, style=style_pytest_mpl) + def test_plot_ModifiableSpanSelector(self, mpl_cleanup): + self.s.plot() + from hyperspy.drawing._widgets.range import ModifiableSpanSelector + ax = self.s._plot.signal_plot.ax + span_v = ModifiableSpanSelector(ax, direction='vertical') + span_v.set_initial((15, 20)) + assert span_v.range == (15, 20) + + span_v.range = (25, 30) + assert span_v.range == (25, 30) + + span_h = ModifiableSpanSelector(ax, direction='horizontal', + rectprops={'color': 'g', 'alpha': 0.2}) + color_rgba = matplotlib.colors.to_rgba('g', alpha=0.2) + assert span_h.rect.get_fc() == color_rgba + assert span_h.rect.get_ec() == color_rgba + span_h.set_initial((50.4, 55.2)) + nt.assert_allclose(span_h.range[0], 50.4) + nt.assert_allclose(span_h.range[1], 55.2) + + span_h.range = (40, 45) + assert span_h.range == (40, 45) + ax.figure.canvas.draw_idle() + + return self.s._plot.signal_plot.figure diff --git a/hyperspy/tests/io/bcf_data/Hitachi_TM3030Plus.bcf b/hyperspy/tests/io/bruker_data/Hitachi_TM3030Plus.bcf similarity index 100% rename from hyperspy/tests/io/bcf_data/Hitachi_TM3030Plus.bcf rename to hyperspy/tests/io/bruker_data/Hitachi_TM3030Plus.bcf diff --git a/hyperspy/tests/io/bcf_data/Nope.bcf b/hyperspy/tests/io/bruker_data/Nope.bcf similarity index 100% rename from hyperspy/tests/io/bcf_data/Nope.bcf rename to hyperspy/tests/io/bruker_data/Nope.bcf diff --git a/hyperspy/tests/io/bcf_data/P45_12bit_packed_8bit.bcf b/hyperspy/tests/io/bruker_data/P45_12bit_packed_8bit.bcf similarity index 100% rename from hyperspy/tests/io/bcf_data/P45_12bit_packed_8bit.bcf rename to hyperspy/tests/io/bruker_data/P45_12bit_packed_8bit.bcf diff --git a/hyperspy/tests/io/bcf_data/P45_16bit.npy b/hyperspy/tests/io/bruker_data/P45_16bit.npy similarity index 100% rename from hyperspy/tests/io/bcf_data/P45_16bit.npy rename to hyperspy/tests/io/bruker_data/P45_16bit.npy diff --git a/hyperspy/tests/io/bcf_data/P45_16bit_ds.npy b/hyperspy/tests/io/bruker_data/P45_16bit_ds.npy similarity index 100% rename from hyperspy/tests/io/bcf_data/P45_16bit_ds.npy rename to hyperspy/tests/io/bruker_data/P45_16bit_ds.npy diff --git a/hyperspy/tests/io/bcf_data/P45_instructively_packed_16bit_compressed.bcf b/hyperspy/tests/io/bruker_data/P45_instructively_packed_16bit_compressed.bcf similarity index 100% rename from hyperspy/tests/io/bcf_data/P45_instructively_packed_16bit_compressed.bcf rename to hyperspy/tests/io/bruker_data/P45_instructively_packed_16bit_compressed.bcf diff --git a/hyperspy/tests/io/bcf_data/P45_the_default_job.bcf b/hyperspy/tests/io/bruker_data/P45_the_default_job.bcf similarity index 100% rename from hyperspy/tests/io/bcf_data/P45_the_default_job.bcf rename to hyperspy/tests/io/bruker_data/P45_the_default_job.bcf diff --git a/hyperspy/tests/io/bcf_data/bcf-edx-ebsd.bcf b/hyperspy/tests/io/bruker_data/bcf-edx-ebsd.bcf similarity index 100% rename from hyperspy/tests/io/bcf_data/bcf-edx-ebsd.bcf rename to hyperspy/tests/io/bruker_data/bcf-edx-ebsd.bcf diff --git a/hyperspy/tests/io/bcf_data/bcf_v2_50x50px.bcf b/hyperspy/tests/io/bruker_data/bcf_v2_50x50px.bcf similarity index 100% rename from hyperspy/tests/io/bcf_data/bcf_v2_50x50px.bcf rename to hyperspy/tests/io/bruker_data/bcf_v2_50x50px.bcf diff --git a/hyperspy/tests/io/bruker_data/bruker_nano.spx b/hyperspy/tests/io/bruker_data/bruker_nano.spx new file mode 100755 index 0000000000..185e91f9cb --- /dev/null +++ b/hyperspy/tests/io/bruker_data/bruker_nano.spx @@ -0,0 +1,285 @@ + + + + + + + this is spectrum decription. What does it mean? + + + + RTHardware + 191 + + 7575 + 7385 + 3 + 96 + 1408 + 8927 + 1E4 + 130000 + -20 + 1 + + + + RTDetector + 3 + 9936 + + SD3pr + 9990 + XFlash 5010 + 4.5E-1 + 2.9E-2 + eJyzcUkt8UmsTC0qtrMB0wYKjiX5ubZKhsZKCiEZmcnZeanFxbZKpq66xkr6UDWGUDXmKEos9ExddY3gioygikxRFJkhm2MMVWKGW4kJVIkFqhK4VfoI9wMAqUU9Ow== + slew AP3.3 + + + + + + + + + 3 + 21 + 5 + -3 + 0 + 1 + 0 + 1 + 1 + 8.5E-1 + -1 + 0.07,0.0058,0.183,0.0078,0.277,0.0058,0.555,0,1.1,0,3.293,0.0064,5.89,0,0,0,0,0, + 0,0.01,0.000801,0.01,0.00298,0.01,0.008902,0.01,0.025,0.010046,0.041098,0.013475,0.04702,0.017302,0.049199,0.019237,0.05,0.02, + 0,0.03,0.00444,0.03,0.01651,0.03,0.049318,0.03,0.1385,0.03023,0.227682,0.047375,0.26049,0.06651,0.27256,0.076185,0.277,0.08, + 0,0.03,0.006283,0.03,0.023364,0.03,0.069793,0.03,0.196,0.030228,0.322207,0.047377,0.368636,0.066512,0.385717,0.076186,0.392,0.08, + 0,0,0.008415,0,0.031291,0,0.093473,0,0.2625,0,0.431527,0.000035,0.493709,0.000073,0.516585,0.000092,0.525,0.0001, + 0,0,0.010836,0,0.040291,0,0.120357,0,0.338,0,0.555643,0.000035,0.635709,0.000073,0.665164,0.000092,0.676,0.0001, + 0,0,0.016687,0,0.062045,0,0.185343,0,0.5205,0,0.855657,0.000035,0.978955,0.000073,1.024313,0.000092,1.041,0.0001, + 0,0,0.020101,0,0.07474,0,0.223266,0,0.627,0,1.030734,0.000035,1.17926,0.000073,1.233899,0.000092,1.254,0.0001, + 0,0,0.023836,0,0.088627,0,0.26475,0,0.7435,0,1.22225,0.000035,1.398373,0.000073,1.463164,0.000092,1.487,0.0001, + 0,0,0.027891,0,0.103707,0,0.309795,0,0.87,0,1.430205,0.000035,1.636293,0.000073,1.712109,0.000092,1.74,0.0001, + 0,0,0.030776,0,0.114435,0,0.341842,0,0.96,0,1.578158,0.000035,1.805565,0.000073,1.889224,0.000092,1.92,0.0001, + 0,0,0.032283,0,0.120037,0,0.358578,0,1.007,0,1.655422,0.000035,1.893963,0.000073,1.981717,0.000092,2.014,0.0001, + 0,0,0.036996,0,0.13756,0,0.410923,0,1.154,0,1.897077,0.000035,2.17044,0.000073,2.271004,0.000092,2.308,0.0001, + 0,0,0.05918,0,0.220049,0,0.657334,0,1.846,0,3.034665,0.000035,3.471952,0.000073,3.63282,0.000092,3.692,0.0001, + 0,0,0.079378,0,0.295146,0,0.881668,0,2.476,0,4.070332,0.000035,4.656854,0.000073,4.872623,0.000092,4.952,0.0001, + 0,0,0.119867,0,0.445699,0,1.331404,0,3.739,0,6.146595,0.000035,7.0323,0.000073,7.358133,0.000092,7.478,0.0001, + 0,0,0.148303,0,0.551433,0,1.647253,0,4.626,0,7.604747,0.000035,8.700567,0.000073,9.103698,0.000092,9.252,0.0001, + 0,0,0.176322,0,0.655616,0,1.958472,0,5.5,0,9.041529,0.000035,10.344384,0.000073,10.823678,0.000092,11,0.0001, + 0,0,0.208381,0,0.774819,0,2.314557,0,6.5,0,10.685443,0.000035,12.225181,0.000073,12.791619,0.000092,13,0.0001, + 0,0,0.24044,0,0.894022,0,2.670643,0,7.5,0,12.329357,0,14.105978,0,14.759561,0,15,0, + 0,0,0.320586,0,1.192029,0,3.560857,0,10,0,16.439142,0,18.80797,0,19.679415,0,20,0, + 0,0,1.60293,0,5.960146,0,17.804287,0,50,0,82.195709,0,94.039856,0,98.397072,0,100,0, + + + + RTESMA + 706 + + 1.5E1 + -1 + -1 + 2.000100008E-3 + 3.5E1 + 4.5E1 + 2 + + 6.666666667E2 + 9 + + + + 82 + 2.3.2018 + + 4096 + -2.38925E-1 + 2.5E-3 + 2.473828662E-4 + 4.349876499E-4 + + Acquired + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,2,10,9,14,12,29,59,52,83,130,160,178,269,322,380,433,478,557,665,640,620,644,609,641,617,464,434,402,307,270,223,169,134,106,78,51,44,20,20,15,8,3,2,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,6,5,5,6,5,8,8,2,4,6,3,10,2,6,4,4,8,2,2,3,2,4,2,2,9,6,1,6,7,5,3,6,8,3,6,9,18,13,9,11,12,15,11,13,20,20,17,21,20,23,22,19,29,19,18,18,21,25,20,25,26,27,32,19,18,10,13,14,12,15,11,8,7,4,9,7,5,5,5,5,8,7,3,1,2,4,5,3,4,8,6,4,3,4,2,6,5,7,6,4,3,3,8,4,6,1,6,4,4,5,5,7,7,5,6,3,5,4,4,5,5,5,0,8,5,8,10,9,5,7,8,9,8,6,9,10,8,9,11,8,12,6,5,7,8,11,8,13,15,12,8,10,21,21,9,16,15,21,17,22,22,19,15,20,12,15,20,29,25,15,30,27,24,26,25,33,34,26,32,36,33,27,39,44,26,34,29,35,40,35,41,33,34,47,45,46,45,53,51,43,35,42,38,39,38,45,36,35,29,40,28,52,41,26,36,43,41,44,33,53,52,57,69,69,79,83,84,86,99,95,94,99,105,108,120,118,108,122,97,103,99,87,80,82,87,75,86,71,54,50,53,53,41,34,31,46,38,31,29,37,28,34,24,27,21,36,28,31,28,39,36,34,40,43,30,41,40,39,30,41,36,46,31,32,33,40,27,42,23,45,40,37,28,32,23,44,31,31,39,36,36,45,40,48,50,68,57,96,80,86,97,119,121,134,152,168,177,203,212,227,211,232,260,268,306,270,295,322,294,281,265,266,237,278,241,213,190,177,166,163,151,138,115,86,75,82,69,54,48,49,58,44,31,35,26,23,24,22,26,22,20,17,19,16,8,11,15,14,11,15,14,14,22,9,9,11,21,12,11,10,15,9,10,8,11,8,14,12,11,12,7,10,13,11,8,6,13,14,6,10,11,17,10,13,11,15,11,17,14,14,7,13,8,10,6,7,10,10,13,13,13,11,5,12,17,16,4,13,14,15,9,11,11,11,14,11,6,12,20,15,15,21,15,9,12,11,14,14,14,22,19,12,17,19,14,11,17,8,16,15,22,11,11,14,26,11,18,13,12,7,14,17,14,19,11,14,19,9,13,17,9,12,17,19,18,16,18,13,10,8,8,10,10,7,12,19,13,13,16,14,13,14,12,18,12,13,12,16,21,12,11,14,14,11,15,9,15,11,13,15,13,16,9,19,14,10,12,20,7,18,14,12,12,16,13,14,12,12,11,16,11,12,10,12,11,10,13,18,20,9,10,13,9,14,22,7,16,14,18,16,18,18,15,13,8,14,11,18,14,10,10,15,14,12,12,18,11,14,15,14,15,8,15,8,15,11,11,20,12,16,16,12,11,19,14,6,13,19,9,15,12,16,15,17,14,14,13,10,13,16,11,14,11,21,18,9,18,9,12,9,13,8,14,16,11,11,20,15,19,18,14,15,16,10,9,15,22,13,14,20,13,15,15,18,17,17,12,19,11,21,26,17,20,9,9,17,15,17,18,17,18,17,14,19,10,15,23,14,15,9,14,15,16,15,15,14,16,20,17,14,12,14,13,10,9,19,14,9,14,19,7,13,18,21,14,16,17,15,13,28,19,11,14,21,13,18,10,17,12,13,12,18,13,9,11,7,9,26,13,17,17,11,11,26,12,17,15,19,11,16,16,12,16,11,13,19,14,17,21,14,10,10,22,13,17,7,19,18,18,11,17,20,9,16,15,14,14,17,18,15,15,15,15,11,18,20,20,22,19,22,20,20,23,13,13,19,18,17,16,16,20,21,21,28,21,19,26,19,23,16,12,29,18,20,14,18,24,26,28,16,24,21,20,35,18,28,30,16,15,26,23,24,30,25,22,32,21,30,24,27,29,22,32,33,24,35,25,25,23,21,31,28,28,35,27,35,36,40,31,45,67,48,55,58,60,77,59,63,98,106,116,119,135,143,156,173,195,211,245,256,275,297,316,356,350,375,368,421,426,487,489,482,513,492,567,563,572,608,567,558,541,556,496,483,511,504,454,436,441,389,366,361,305,339,308,297,246,241,192,179,166,137,148,124,96,102,79,81,82,62,68,38,54,53,45,49,41,40,45,33,29,41,36,38,32,46,41,32,42,33,47,47,35,39,48,47,53,55,49,53,49,44,46,63,43,36,39,55,43,64,37,39,46,36,35,40,35,24,38,23,31,22,24,33,19,30,11,19,20,14,17,13,18,17,13,19,12,14,15,20,12,10,9,8,11,13,19,9,8,15,9,9,6,8,8,14,8,6,6,10,9,12,7,9,14,15,6,11,6,11,10,11,10,5,20,8,10,9,9,9,6,14,6,9,9,9,15,5,11,5,5,16,5,9,17,11,4,8,8,9,10,7,11,5,8,7,10,11,5,5,5,10,6,10,3,11,6,9,5,10,15,13,9,13,9,10,11,7,8,11,9,11,2,7,12,10,5,7,8,9,7,9,9,9,4,7,6,2,7,11,4,12,10,9,9,16,10,12,12,5,9,8,9,10,8,4,7,7,17,11,8,11,5,9,10,10,4,14,9,5,16,10,7,11,8,9,10,6,8,7,9,8,7,13,9,10,12,7,5,11,6,5,12,11,12,6,8,7,8,12,10,9,9,12,10,9,5,8,7,12,7,11,5,5,7,10,8,11,9,3,11,11,10,12,6,14,7,9,9,7,7,7,16,7,10,8,8,7,3,5,9,10,9,10,8,15,8,9,7,9,9,7,5,9,4,8,4,10,7,7,11,8,6,6,5,12,8,3,11,5,10,7,14,6,10,8,11,7,9,12,13,14,11,10,8,6,7,11,8,4,5,12,6,8,7,8,13,7,9,7,7,7,7,8,12,8,8,5,12,19,6,6,7,4,17,6,10,7,7,14,5,11,2,10,11,12,8,4,9,10,8,6,12,3,5,4,7,9,7,10,7,9,10,5,12,14,7,7,7,14,11,5,7,11,9,5,8,6,9,10,11,6,8,13,5,9,4,8,12,3,11,4,6,8,7,6,7,11,6,4,2,5,4,6,6,3,9,6,9,9,8,8,12,6,11,4,7,7,8,10,8,8,10,9,6,7,11,5,7,8,9,10,6,7,5,8,6,6,8,14,7,20,14,12,8,5,5,9,12,6,3,4,9,6,8,6,2,8,7,11,4,6,7,8,9,4,8,6,10,6,6,7,4,3,8,8,9,3,5,6,4,8,11,14,10,5,13,12,8,10,7,9,15,6,8,6,6,6,4,6,7,13,3,5,5,5,13,2,3,8,3,12,9,5,6,4,4,6,8,7,8,11,5,6,6,6,8,5,2,6,6,6,7,5,3,12,5,5,4,9,4,7,4,7,6,8,10,10,6,14,5,7,3,8,2,7,5,6,6,6,11,5,3,6,3,9,8,12,9,7,9,6,3,9,4,5,8,5,10,7,6,7,4,10,9,2,7,9,7,8,5,6,5,15,8,9,6,5,4,7,6,8,6,7,5,2,6,5,8,4,4,4,8,3,7,8,5,7,6,8,7,7,4,7,6,9,11,9,5,9,9,12,5,9,9,7,3,7,5,8,4,5,13,11,6,4,7,7,8,6,6,7,8,2,4,6,2,6,5,4,5,3,12,6,6,5,3,6,1,5,12,5,4,3,7,5,10,5,10,4,8,6,9,6,5,7,10,6,5,9,10,4,8,1,11,7,3,7,2,8,4,4,6,6,6,3,6,7,6,5,8,7,9,10,6,3,6,8,3,2,8,7,6,6,5,8,3,4,9,6,6,11,5,5,5,6,4,7,7,7,11,8,11,5,5,3,4,7,6,4,5,7,6,6,11,7,8,8,10,6,7,6,6,6,6,5,10,4,5,4,5,10,2,10,4,3,6,9,7,2,3,1,6,9,11,7,2,4,8,4,4,9,8,5,6,11,7,2,9,4,9,4,5,8,6,1,6,8,2,3,7,5,4,11,7,6,6,8,7,7,6,5,4,7,5,5,7,5,8,3,8,6,8,1,5,8,4,4,0,8,7,5,8,8,5,6,4,5,3,9,5,5,8,9,10,7,5,6,4,4,6,7,2,5,6,14,11,5,7,9,5,3,4,8,8,4,6,4,4,6,6,8,6,10,1,6,10,5,4,6,4,4,5,12,4,2,11,7,3,4,4,6,4,7,7,6,5,7,4,3,9,4,6,8,2,4,4,5,2,6,7,3,8,1,7,5,2,7,5,3,5,2,7,8,3,5,6,2,6,7,4,6,6,9,5,4,4,5,9,5,8,8,5,6,7,7,4,4,1,4,4,3,9,5,11,10,2,4,11,4,9,6,6,4,7,4,6,1,4,5,2,7,4,3,8,4,4,5,5,7,8,4,4,4,11,10,2,2,2,7,2,7,4,4,7,5,4,5,4,4,3,7,5,4,3,8,2,7,4,4,5,4,9,5,9,7,4,4,8,6,1,5,5,2,4,3,2,4,7,6,5,4,7,6,4,5,6,6,5,1,5,1,1,3,3,9,4,5,8,5,7,7,7,4,3,9,3,2,8,7,0,5,3,2,2,5,7,4,5,3,5,3,5,4,7,4,4,6,1,1,1,5,3,5,4,8,5,6,4,5,3,7,4,8,5,4,9,5,4,6,7,6,6,5,5,6,7,4,4,4,4,1,5,10,3,7,2,5,6,6,3,1,2,7,4,4,4,4,10,8,7,1,4,12,8,6,8,9,2,3,8,6,2,4,5,3,5,6,6,7,5,2,4,1,11,7,2,4,5,5,6,6,5,7,6,5,2,7,10,5,5,8,1,3,4,3,3,3,3,5,5,6,5,3,4,0,3,3,4,3,9,1,5,5,6,6,5,8,5,8,3,3,4,6,4,3,5,6,3,6,1,5,4,2,1,1,9,4,7,6,3,4,3,3,4,2,3,3,2,5,4,6,3,4,5,5,2,2,3,6,4,4,7,1,6,5,4,7,4,5,3,4,2,8,0,2,7,3,5,5,5,3,7,9,3,6,6,2,5,0,4,2,4,4,2,3,7,5,5,3,3,4,6,1,6,1,8,6,5,3,3,2,8,0,3,2,3,1,4,4,2,3,6,2,0,1,2,2,5,7,3,4,2,5,4,6,5,2,6,4,5,3,6,3,7,6,2,2,5,6,7,2,5,4,2,3,3,3,0,2,1,7,2,3,7,3,3,5,3,4,7,2,3,3,4,5,6,4,1,2,7,5,6,8,3,7,8,2,4,3,5,4,5,0,3,3,5,3,7,2,4,3,4,3,4,3,6,4,4,3,1,4,5,6,1,5,3,5,2,3,5,4,3,4,3,2,7,4,2,8,5,3,4,1,3,3,2,5,4,5,2,4,1,1,4,1,8,1,4,3,1,1,1,5,3,6,4,5,1,1,4,2,8,1,4,2,3,5,6,5,4,3,3,6,1,3,2,6,3,3,2,5,5,6,6,1,6,4,9,4,3,1,4,2,3,5,4,6,5,8,3,2,1,5,6,1,2,4,7,4,2,2,1,4,3,9,7,2,5,1,4,3,4,5,5,2,4,5,6,6,4,3,8,1,11,7,8,9,7,4,7,12,9,11,11,13,9,5,16,19,17,20,19,14,22,20,28,29,33,28,28,31,35,34,37,31,41,39,36,43,61,66,58,61,50,68,63,66,63,61,71,60,79,74,67,75,80,64,61,93,74,94,76,63,74,81,82,81,64,77,69,88,97,55,80,69,62,67,74,61,51,67,68,62,63,49,45,45,44,51,44,41,37,28,40,24,27,29,30,26,19,15,15,21,24,16,11,13,15,12,12,11,7,8,6,15,4,9,4,4,3,5,4,5,8,3,4,5,4,2,6,5,3,3,4,2,1,4,6,1,3,4,4,2,2,4,3,1,3,2,2,1,5,2,4,1,1,3,3,9,2,2,1,3,5,4,9,0,4,4,5,3,6,4,5,6,3,3,1,5,4,2,3,2,2,3,0,6,0,4,3,3,2,2,5,4,6,3,3,3,0,3,5,4,5,2,1,7,3,1,3,7,2,1,1,3,4,4,3,3,4,5,2,1,2,6,0,5,3,2,6,5,3,4,5,4,4,1,0,3,4,4,4,0,4,4,1,3,2,2,4,3,2,3,2,2,2,1,1,0,3,1,6,4,7,2,4,2,4,2,5,2,3,5,3,6,3,3,6,8,2,7,3,4,2,3,6,5,7,7,4,4,8,7,4,3,7,14,11,11,9,9,8,11,15,7,11,11,13,11,14,12,11,7,11,11,10,10,9,9,9,16,12,10,14,17,11,11,10,13,14,12,11,11,11,16,8,13,7,12,10,8,7,6,12,8,7,11,5,6,5,10,8,12,1,6,8,6,5,6,5,8,8,4,6,6,1,4,6,4,1,3,4,5,2,4,4,5,4,5,1,2,6,0,3,3,3,3,1,3,3,2,1,1,3,1,4,1,0,3,4,2,1,4,2,4,4,5,1,4,1,4,1,6,3,4,4,1,4,1,2,0,1,5,3,3,1,3,3,2,1,1,1,0,2,4,3,2,2,2,3,3,5,3,2,2,2,3,1,1,1,2,5,2,3,2,2,2,2,2,2,3,4,2,2,2,3,4,2,3,3,1,2,3,2,4,0,3,4,3,6,0,3,3,5,5,3,3,2,1,5,2,4,1,1,2,2,3,3,2,2,1,1,5,3,1,1,2,5,2,0,0,1,4,6,2,2,1,5,4,1,3,1,1,4,2,3,2,3,5,1,4,2,5,4,3,2,1,2,0,1,0,2,2,5,3,1,3,3,1,1,2,0,3,0,6,2,2,0,2,1,2,3,2,2,3,0,4,1,6,1,1,3,5,0,2,1,0,1,1,1,3,2,2,1,2,2,0,1,2,1,7,2,1,5,3,0,6,3,2,3,2,0,2,0,2,2,6,3,1,2,3,1,2,3,2,0,2,3,3,3,2,1,8,3,2,3,3,1,1,4,2,0,4,0,2,3,3,0,1,2,2,0,0,1,2,4,5,4,2,2,3,4,1,5,6,12,5,3,5,1,5,8,7,8,3,5,8,7,11,7,10,12,11,8,4,10,11,16,11,13,13,12,21,19,17,8,14,17,21,23,23,29,17,22,22,37,25,19,22,37,26,33,40,42,23,26,36,25,30,51,41,30,37,29,32,42,35,40,36,38,29,40,40,34,29,40,31,33,35,48,23,32,31,28,26,44,30,28,25,25,26,26,19,30,29,18,14,25,16,24,22,14,17,16,12,5,10,7,7,14,12,13,9,8,8,8,6,9,4,3,5,10,4,2,4,5,2,3,4,0,4,5,6,3,1,1,4,6,1,7,1,3,1,4,3,3,2,3,1,3,1,3,3,3,2,1,4,1,1,1,3,2,0,7,3,3,3,2,5,1,2,1,0,2,5,6,4,2,1,0,0,1,0,0,1,1,3,0,1,0,1,5,1,1,2,0,0,0,2,1,2,3,2,0,3,3,1,2,0,0,3,7,5,2,1,2,4,2,2,0,0,0,2,1,0,3,0,3,2,1,0,4,0,2,3,2,0,3,3,3,2,2,2,1,1,1,3,2,4,5,3,1,1,1,1,1,1,3,2,1,1,2,1,4,2,4,3,0,2,1,1,0,2,3,3,2,0,0,3,1,2,2,1,1,0,2,3,0,1,0,1,2,2,1,3,0,0,4,1,0,3,2,2,3,2,4,1,3,1,1,3,3,3,1,0,2,1,0,2,5,2,2,1,0,0,1,2,2,1,0,3,0,1,2,0,4,2,1,3,2,0,2,1,5,4,0,2,1,2,1,0,2,3,3,1,0,2,1,0,0,2,3,2,0,3,2,2,5,5,1,4,4,3,4,5,3,4,3,8,6,8,5,3,2,6,3,5,5,7,3,6,4,2,6,3,6,2,2,4,3,4,3,7,7,7,8,5,5,6,8,10,6,5,6,2,5,5,7,5,6,2,8,14,7,9,3,8,2,4,8,5,5,6,2,5,8,4,8,3,6,1,7,2,3,1,1,3,0,2,2,5,7,4,4,6,7,2,3,3,4,0,3,2,3,0,3,6,2,1,0,2,3,1,1,2,1,3,3,3,2,1,1,4,2,1,1,1,3,2,1,1,1,2,1,0,1,0,1,2,4,1,0,0,2,1,3,6,0,2,2,2,1,1,2,1,0,3,2,2,2,2,4,1,1,2,3,2,2,1,0,1,0,1,0,2,1,4,2,0,1,1,1,3,0,3,1,3,1,0,1,1,2,3,1,1,3,0,3,3,1,4,0,1,1,3,2,2,1,0,2,0,0,0,1,0,1,0,2,2,0,1,1,0,4,3,1,0,1,2,2,2,2,2,1,0,2,2,0,1,2,2,1,0,1,2,0,1,1,3,2,2,3,1,1,3,0,1,0,0,1,2,1,0,1,1,2,0,2,1,0,0,2,2,1,2,0,4,4,1,2,1,2,1,0,0,2,0,1,0,1,0,0,1,0,1,0,1,0,0,4,1,0,4,0,3,1,1,1,2,1,0,1,1,2,0,5,3,4,0,0,1,1,2,1,2,1,2,2,0,2,1,0,0,1,0,1,0,0,1,0,1,1,2,0,2,2,2,1,1,1,0,2,2,1,2,1,0,1,2,1,2,1,2,1,2,0,1,0,1,0,1,1,0,1,0,1,4,3,1,1,0,4,1,0,1,1,1,3,0,2,2,0,1,2,3,0,0,1,2,0,2,0,0,1,2,1,3,0,0,4,3,0,1,1,1,0,2,0,1,1,2,1,1,1,0,0,2,3,1,1,1,1,3,1,0,1,2,3,0,0,1,0,1,2,2,3,3,2,1,2,1,1,2,0,0,1,0,2,0,0,0,1,0,1,1,1,1,3,0,3,0,1,0,2,0,1,1,0,3,1,0,0,0,1,2,1,4,1,2,1,1,0 + + + 16 + K-Serie + 4.176372041E-1 + 2.637948976E-1 + 1.898079836E4 + 1.1E2 + 3.743534268E-2 + + + 26 + K-Serie + 2.787831594E-1 + 3.066823675E-1 + 4.510390669E3 + 2.5E1 + 3.490403022E-2 + + + 29 + K-Serie + 3.035796365E-1 + 3.799997213E-1 + 2.290628769E3 + 1.4E1 + 4.096377034E-2 + + + 26 + Fe + Fe-Ka + KA + 6.39862973 + 1.964821903E-1 + 4263 + 4252 + + + 16 + S + S-Ka + KA + 2.3067 + 1.262233472E-1 + 18642 + 18457 + + + 29 + Cu + Cu-Ka + KA + 8.036489146 + 2.183585218E-1 + 2208 + 2207 + + 0 + + + + + 26 + 16744192 + 1 + + + 16 + 16744192 + 1 + + + 29 + 65535 + 1 + + + + + + + + Analysis made on Cameca SXFiveFE with Bruker Nano detector + + + + 2 + 1 + 1 + 1 + + + 0 + 0, + 0,Name + + + 0 + 0, + 0,Value + + + + + + + 2.4825E-2 + 9.962325 + 0 + 5.782313565E1 + 0 + 5.782313565E1 + 0 + 0 + 0 + 1 + Short + Lines + 55,54,50 + 0 + 5 + 5 + 0 + + Calibri + 11 + 150,150,150 + + + Calibri + 16 + 150,150,150 + + 0 + + + bruker_nano.spx + bruker_nano.spx + 0 + 1 + 195,0,0 + 195,0,0 + 1 + Solid + 1 + 0 + 1 + 0 + + + + 1 + Hertz + 0 + None + 1 + 1 + 0 + 1 + 1 + 1 + 0 + 0 + 0 + 1 + 6 + 1 + + Verdana + 16 + 8,0,0 + + + + + diff --git a/hyperspy/tests/io/bruker_data/extracted_from_bcf.spx b/hyperspy/tests/io/bruker_data/extracted_from_bcf.spx new file mode 100755 index 0000000000..9b3a6f8150 --- /dev/null +++ b/hyperspy/tests/io/bruker_data/extracted_from_bcf.spx @@ -0,0 +1,191 @@ + + + + + + + + RTHardware + 137 + + 40 + 25 + 3.8E1 + 95 + 33332 + 143400 + 2E4 + 60000 + + + + RTDetector + 5 + 9932 + + SDDpr + 8279_80 + XFlash 6|30 + 0.45 + 0.029 + eJyzcUkt8UmsTC0qtrMB0wYKjiX5ubZKhsZKCiEZmcnZeanFxbZKpq66xkr6UDWGUDXmKEos9ICKjOCKjKCKTFEUmSGbYwxVYoZbiQlUiQWqErhV+gj3AwCpRT07 + slew AP3.3 + + + + + + + + + + + + Internal + 1 + + + Internal + 8E-2 + 1E1 + 1 + 1 + + + Internal + 8E-2 + 5.55E-1 + 1 + + + + 3 + 21 + 5 + -3 + 0 + 1 + False + 1 + 1 + 5.115E-1 + -9.315E-1 + 0.07,0.0058,0.183,0.0078,0.277,0.0058,0.555,0,1.1,0,3.293,0.0064,5.89,0,0,0,0,0, + 0,0.01,0.000801,0.01,0.00298,0.01,0.008902,0.01,0.025,0.010046,0.041098,0.013475,0.04702,0.017302,0.049199,0.019237,0.05,0.02, + 0,0.026417,0.003686,0.026417,0.013709,0.026417,0.04095,0.026417,0.115,0.026585,0.18905,0.039072,0.216292,0.053009,0.226314,0.060056,0.23,0.062835, + 0,0.03,0.005362,0.03,0.019937,0.03,0.059556,0.03,0.16725,0.030229,0.274944,0.047376,0.314563,0.066511,0.329139,0.076186,0.3345,0.08, + 0,0.015,0.007349,0.015,0.027328,0.015,0.081633,0.015,0.22925,0.015114,0.376867,0.023706,0.431172,0.033293,0.451151,0.038139,0.4585,0.04005, + 0,0,0.009626,0,0.035791,0,0.106915,0,0.30025,0,0.493585,0.000035,0.564709,0.000073,0.590874,0.000092,0.6005,0.0001, + 0,0,0.013761,0,0.051168,0,0.15285,0,0.42925,0,0.70565,0.000035,0.807332,0.000073,0.844738,0.000092,0.8585,0.0001, + 0,0,0.018394,0,0.068393,0,0.204305,0,0.57375,0,0.943195,0.000035,1.079108,0.000073,1.129106,0.000092,1.1475,0.0001, + 0,0,0.021968,0,0.081684,0,0.244008,0,0.68525,0,1.126492,0.000035,1.288816,0.000073,1.348531,0.000092,1.3705,0.0001, + 0,0,0.025864,0,0.096167,0,0.287273,0,0.80675,0,1.326227,0.000035,1.517333,0.000073,1.587636,0.000092,1.6135,0.0001, + 0,0,0.029334,0,0.109071,0,0.325818,0,0.915,0,1.504182,0.000035,1.720929,0.000073,1.800667,0.000092,1.83,0.0001, + 0,0,0.03153,0,0.117236,0,0.35021,0,0.9835,0,1.61679,0.000035,1.849764,0.000073,1.935471,0.000092,1.967,0.0001, + 0,0,0.03464,0,0.128798,0,0.38475,0,1.0805,0,1.776249,0.000035,2.032202,0.000073,2.12636,0.000092,2.161,0.0001, + 0,0,0.048088,0,0.178804,0,0.534129,0,1.5,0,2.465871,0.000035,2.821196,0.000073,2.951912,0.000092,3,0.0001, + 0,0,0.069279,0,0.257598,0,0.769501,0,2.161,0,3.552499,0.000035,4.064403,0.000073,4.252721,0.000092,4.322,0.0001, + 0,0,0.099623,0,0.370422,0,1.106536,0,3.1075,0,5.108463,0.000035,5.844577,0.000073,6.115378,0.000092,6.215,0.0001, + 0,0,0.134085,0,0.498566,0,1.489329,0,4.1825,0,6.875671,0.000035,7.866434,0.000073,8.230915,0.000092,8.365,0.0001, + 0,0,0.162313,0,0.603525,0,1.802863,0,5.063,0,8.323137,0.000035,9.522476,0.000073,9.963688,0.000092,10.125999,0.0001, + 0,0,0.192351,0,0.715217,0,2.136515,0,6,0,9.863485,0.000035,11.284782,0.000073,11.807649,0.000092,12,0.0001, + 0,0,0.24044,0,0.894022,0,2.670643,0,7.5,0,12.329357,0,14.105978,0,14.759561,0,15,0, + 0,0,0.320586,0,1.192029,0,3.560857,0,10,0,16.439142,0,18.80797,0,19.679415,0,20,0, + 0,0,1.60293,0,5.960146,0,17.804287,0,50,0,82.195709,0,94.039856,0,98.397072,0,100,0, + + + + RTESMA + 662 + + 1.5E1 + -1 + -1 + 2.000100008E-3 + 3.5E1 + 4.5E1 + 2 + + + + + 82 + 4.3.2018 + + 1548 + -4.7225E-1 + 1E-2 + 3.524514617E-4 + 4.311038316E-4 + + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,4,8,25,68,118,163,178,129,87,31,13,2,1,0,0,0,0,0,1,1,0,0,0,1,1,2,0,4,2,1,7,8,14,17,8,7,4,8,0,2,0,0,4,5,3,0,3,1,4,3,4,3,7,6,8,10,14,4,10,11,11,11,16,16,15,11,19,21,10,20,19,19,24,20,31,48,45,38,44,41,21,17,12,5,5,3,3,2,13,7,13,10,10,14,8,5,8,1,3,4,5,5,3,6,2,4,4,1,1,0,2,2,0,2,1,2,2,2,1,1,2,4,5,3,3,0,1,2,4,4,3,2,2,6,1,3,4,2,8,1,2,1,3,6,1,7,4,4,5,6,2,3,4,3,7,3,7,6,5,4,7,3,2,5,4,5,5,3,1,4,3,1,6,2,2,2,7,1,3,4,3,7,7,6,9,3,6,1,1,3,6,8,5,5,5,3,3,5,4,5,3,3,6,3,4,5,4,3,4,1,2,1,7,5,4,4,2,3,4,3,3,2,2,6,4,5,2,3,4,1,3,3,4,3,5,5,2,6,12,3,4,3,3,5,3,4,4,7,4,0,1,5,3,3,6,3,2,3,4,3,2,4,2,2,1,1,1,5,5,4,4,5,2,9,3,5,6,3,2,3,5,4,3,5,3,4,2,1,3,6,4,3,4,4,3,1,5,1,2,4,6,4,5,3,2,3,1,7,5,7,1,3,3,6,4,2,2,2,6,2,3,3,6,0,4,2,5,8,5,2,5,2,4,3,2,2,1,2,3,2,2,0,4,0,4,1,0,3,1,1,3,1,3,2,1,3,3,3,2,2,4,3,1,1,2,5,1,3,1,1,2,0,3,3,1,0,4,2,2,2,2,2,2,3,1,5,3,2,1,5,3,1,2,1,1,2,2,2,1,6,1,2,0,3,1,1,2,2,2,2,1,2,2,1,5,3,3,2,2,2,1,1,3,0,1,0,1,2,1,6,1,3,2,0,0,2,4,7,3,2,2,1,3,1,2,2,2,0,1,3,1,1,2,0,3,2,1,2,1,2,4,1,3,1,0,2,3,0,0,2,1,2,0,2,4,1,1,1,0,2,1,2,2,0,3,1,1,1,0,0,2,1,0,3,5,1,2,3,1,2,2,1,1,1,0,1,2,2,2,1,1,3,1,0,0,0,2,2,0,0,1,1,1,1,2,0,0,0,4,0,2,2,3,5,2,3,5,6,5,8,10,8,24,16,22,24,19,17,13,22,26,13,17,11,9,6,5,6,2,1,5,2,1,1,1,0,1,2,2,0,0,1,2,0,3,2,3,3,1,1,1,1,2,2,1,2,0,0,2,4,4,5,2,1,6,6,3,3,3,5,8,5,4,4,6,2,2,1,4,3,2,1,0,0,0,1,3,3,1,0,1,2,1,1,2,0,1,1,0,0,3,1,4,2,10,6,7,14,16,11,30,38,38,43,44,42,46,63,39,44,32,21,18,17,17,11,12,15,7,4,5,3,3,0,1,1,1,1,1,5,3,1,0,1,2,2,0,2,0,0,1,2,1,1,0,1,1,1,0,1,0,0,2,1,2,0,3,1,1,1,2,1,5,3,3,4,6,6,5,9,6,4,3,6,1,5,5,4,4,1,1,5,3,2,1,2,2,0,2,0,1,0,1,0,1,1,0,0,2,0,2,1,1,3,0,3,5,3,2,7,1,7,6,4,4,4,4,4,3,3,0,1,4,0,2,1,3,0,0,0,0,3,1,2,1,1,0,3,0,0,1,0,2,1,1,0,0,1,0,0,0,0,0,1,1,0,0,2,1,1,1,1,1,2,1,0,0,1,1,1,0,3,0,1,0,0,0,1,1,0,0,1,2,0,2,0,0,0,0,3,0,2,0,1,3,1,1,0,0,1,0,0,1,1,0,0,0,1,3,1,1,0,0,0,1,2,0,0,0,1,1,1,0,2,0,0,2,0,0,0,0,1,2,0,1,1,1,1,1,0,0,1,0,0,0,0,0,1,0,0,0,0,1,0,1,0,1,0,0,0,0,2,1,2,0,0,0,1,0,1,0,1,1,2,2,1,0,1,0,0,0,1,0,1,0,1,0,0,0,2,0,0,1,1,1,0,0,1,1,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,2,2,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2,1,0,1,1,1,0,2,2,2,0,0,0,0,0,0,0,0,1,2,1,1,0,2,1,1,1,0,0,1,0,1,0,1,1,1,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,1,0,0,1,1,0,0,1,2,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,1,1,1,0,0,0,0,0,1,0,1,1,0,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,1,1,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,2,1,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 + + + + -0.48665783 + 0.81256217 + 0 + 39.2928022966004 + 0 + 0 + 0 + 0 + Short + Lines + 223,223,223 + 5 + 5 + 0 + + Verdana + 11 + 8,0,0 +