Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/RELEASE_next_patch' into RELEA…
Browse files Browse the repository at this point in the history
…SE_next_minor
  • Loading branch information
ericpre committed Jun 18, 2022
2 parents 5f15a54 + af3ec43 commit c868840
Show file tree
Hide file tree
Showing 51 changed files with 745 additions and 1,072 deletions.
2 changes: 2 additions & 0 deletions doc/user_guide/big_data.rst
Original file line number Diff line number Diff line change
Expand Up @@ -327,6 +327,8 @@ Practical tips
Despite the limitations detailed below, most HyperSpy operations can be
performed lazily. Important points are:
.. _big_data.chunking:
Chunking
^^^^^^^^
Expand Down
2 changes: 1 addition & 1 deletion doc/user_guide/interactive_operations_ROIs.rst
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,7 @@ ROIs can be used in place of slices when indexing. For example:

In addition the following all ROIs have a py:meth:`__getitem__` method that enables
using them in place of tuples.
For example, the method :py:meth:`~._signals.signal2d.align2D` takes a ``roi``
For example, the method :py:meth:`~._signals.Signal2D.align2D` takes a ``roi``
argument with the left, right, top, bottom coordinates of the ROI.
Handily, we can pass a :py:class:`~.roi.RectangularROI` ROI instead.

Expand Down
6 changes: 3 additions & 3 deletions doc/user_guide/io.rst
Original file line number Diff line number Diff line change
Expand Up @@ -379,7 +379,7 @@ This feature is particularly useful when using

The hyperspy HDF5 format supports chunking the data into smaller pieces to make it possible to load only part
of a dataset at a time. By default, the data is saved in chunks that are optimised to contain at least one
full signal shape for non-lazy signal, while for lazy signal, the chunking of the dask is used. It is possible to
full signal. It is possible to
customise the chunk shape using the ``chunks`` keyword.
For example, to save the data with ``(20, 20, 256)`` chunks instead of the default ``(7, 7, 2048)`` chunks
for this signal:
Expand All @@ -398,8 +398,8 @@ what, for large signal spaces usually leads to smaller chunks as ``guess_chunk``
constrain of storing at least one signal per chunks. For example, for the signal in the example above
passing ``chunks=True`` results in ``(7, 7, 256)`` chunks.

Choosing the correct chunk-size can significantly affect the speed of reading, writing and performance of many hyperspy algorithms.
See the `chunking section <big_data.html#Chunking>`__ under `Working with big data <big_data.html>`__ for more information.
Choosing the correct chunk-size can significantly affect the speed of reading, writing and performance of many HyperSpy algorithms.
See the :ref:`chunking section <big_data.chunking>` for more information.

Extra saving arguments
^^^^^^^^^^^^^^^^^^^^^^
Expand Down
2 changes: 1 addition & 1 deletion hyperspy/Release.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
# When running setup.py the ".dev" string will be replaced (if possible)
# by the output of "git describe" if git is available or the git
# hash if .git is present.
version = "1.8.0.dev0"
version = "1.7.1.dev0"
__version__ = version
description = "Multidimensional data analysis toolbox"
license = 'GPL v3'
Expand Down
2 changes: 2 additions & 0 deletions hyperspy/_signals/dielectric_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@

class DielectricFunction(ComplexSignal1D):

"""Complex signal class for dielectric functions."""

_signal_type = "DielectricFunction"
_alias_signal_types = ["dielectric function"]

Expand Down
2 changes: 2 additions & 0 deletions hyperspy/_signals/eds.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@

class EDSSpectrum(Signal1D):

"""General 1D signal class for EDS spectra."""

_signal_type = "EDS"

def __init__(self, *args, **kwards):
Expand Down
2 changes: 2 additions & 0 deletions hyperspy/_signals/eds_sem.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,8 @@ class EDSSEMParametersUI(BaseSetMetadataItems):

class EDSSEMSpectrum(EDSSpectrum):

"""1D signal class for EDS spectra measured in an SEM."""

_signal_type = "EDS_SEM"

def __init__(self, *args, **kwards):
Expand Down
2 changes: 2 additions & 0 deletions hyperspy/_signals/eds_tem.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,8 @@ class EDSTEMParametersUI(BaseSetMetadataItems):

class EDSTEMSpectrum(EDSSpectrum):

"""1D signal class for EDS spectra measured in a TEM."""

_signal_type = "EDS_TEM"

def __init__(self, *args, **kwards):
Expand Down
2 changes: 2 additions & 0 deletions hyperspy/_signals/eels.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,8 @@ class EELSTEMParametersUI(BaseSetMetadataItems):

class EELSSpectrum(Signal1D):

"""1D signal class for EELS spectra."""

_signal_type = "EELS"
_alias_signal_types = ["TEM EELS"]

Expand Down
5 changes: 3 additions & 2 deletions hyperspy/_signals/signal1d.py
Original file line number Diff line number Diff line change
Expand Up @@ -260,8 +260,9 @@ def _shift1D(data, **kwargs):


class Signal1D(BaseSignal, CommonSignal1D):
"""
"""

"""General 1D signal class."""

_signal_dimension = 1

def __init__(self, *args, **kwargs):
Expand Down
16 changes: 9 additions & 7 deletions hyperspy/_signals/signal2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ def estimate_image_shift(ref, image, roi=None, sobel=True,
Returns
-------
shifts: np.array
containing the estimate shifts
containing the estimate shifts in pixels
max_value : float
The maximum value of the correlation
Expand Down Expand Up @@ -310,8 +310,8 @@ def estimate_image_shift(ref, image, roi=None, sobel=True,

class Signal2D(BaseSignal, CommonSignal2D):

"""
"""
"""General 2D signal class."""

_signal_dimension = 2

def __init__(self, *args, **kwargs):
Expand Down Expand Up @@ -463,8 +463,8 @@ def estimate_shift2D(self,
Returns
-------
shifts : list of array
List of estimated shifts
shifts : array
Estimated shifts in pixels.
Notes
-----
Expand Down Expand Up @@ -625,8 +625,10 @@ def align2D(
fill_value : int, float, nan
The areas with missing data are filled with the given value.
Default is nan.
shifts : None or list of tuples
If None the shifts are estimated using
shifts : None or array.
The array of shifts must be in pixel units. The shape must be
the navigation shape using numpy order convention. If `None`
the shifts are estimated using
:py:meth:`~._signals.signal2D.estimate_shift2D`.
expand : bool
If True, the data will be expanded to fit all data after alignment.
Expand Down
7 changes: 5 additions & 2 deletions hyperspy/axes.py
Original file line number Diff line number Diff line change
Expand Up @@ -1540,8 +1540,11 @@ def __getitem__(self, y):
return ans

def _axes_getter(self, y):
if y in self._axes:
return y
if isinstance(y, BaseDataAxis):
if y in self._axes:
return y
else:
raise ValueError(f"{y} is not in {self}")
if isinstance(y, str):
axes = list(self._get_axes_in_natural_order())
while axes:
Expand Down
1 change: 1 addition & 0 deletions hyperspy/drawing/_widgets/range.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,7 @@ def _span_changed(self, *args, **kwargs):
extents = self.span.extents
self._pos = np.array([extents[0]])
self._size = np.array([extents[1] - extents[0]])
self.events.changed.trigger(self)

def _get_range(self):
p = self._pos[0]
Expand Down
47 changes: 31 additions & 16 deletions hyperspy/drawing/signal1d.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,13 +177,20 @@ def plot(self, data_function_kwargs={}, **kwargs):
self.ax.set_title(self.title)
x_axis_upper_lims = []
x_axis_lower_lims = []

for line in self.ax_lines:
line.plot(data_function_kwargs=data_function_kwargs, **kwargs)
x_axis_lower_lims.append(line.axis.axis[0])
x_axis_upper_lims.append(line.axis.axis[-1])
if len(line.axis.axis) > 1:
x_axis_lower_lims.append(line.axis.axis[0])
x_axis_upper_lims.append(line.axis.axis[-1])

for marker in self.ax_markers:
marker.plot(render_figure=False)
plt.xlim(np.min(x_axis_lower_lims), np.max(x_axis_upper_lims))

plt.xlim(min(x_axis_lower_lims, default=None),
max(x_axis_upper_lims, default=None)
)

self.axes_manager.events.indices_changed.connect(self.update, [])
self.events.closed.connect(
lambda: self.axes_manager.events.indices_changed.disconnect(
Expand Down Expand Up @@ -484,28 +491,36 @@ def update(self, force_replot=False, render_figure=True,
else:
self.line.set_ydata(ydata)

if 'x' in self.autoscale:
self.ax.set_xlim(axis[0], axis[-1])
# Don't change xlim if axis has 0 length (unnecessary)
if 'x' in self.autoscale and len(axis) > 0:
x_min, x_max = axis[0], axis[-1]
if x_min == x_max:
# To avoid matplotlib UserWarning when calling `set_ylim`
x_min, x_max = (x_min - 0.1, x_min + 0.1)
self.ax.set_xlim(x_min, x_max)

if 'v' in self.autoscale:
# Don't change ymin if data has 0 length (unnecessary)
if 'v' in self.autoscale and len(ydata) > 0:
self.ax.relim()
y1, y2 = np.searchsorted(axis, self.ax.get_xbound())
y2 += 2
y1, y2 = np.clip((y1, y2), 0, len(ydata - 1))
clipped_ydata = ydata[y1:y2]
# Based on the current zoom of the x axis, find the corresponding
# y range of data and calculate the y_min, y_max accordingly
i1, i2 = np.searchsorted(axis, self.ax.get_xbound())
# Make interval wider on both side and clip to allowed range
i1, i2 = np.clip((i1-1, i2+1), 0, len(ydata - 1))
ydata = ydata[i1:i2]

with ignore_warning(category=RuntimeWarning):
# In case of "All-NaN slices"
y_max, y_min = (np.nanmax(clipped_ydata),
np.nanmin(clipped_ydata))
y_max, y_min = np.nanmax(ydata), np.nanmin(ydata)

if self._plot_imag:
# Add real plot
yreal = self._get_data(real_part=True)
clipped_yreal = yreal[y1:y2]
yreal = self._get_data(real_part=True)[i1:i2]
with ignore_warning(category=RuntimeWarning):
# In case of "All-NaN slices"
y_min = min(y_min, np.nanmin(clipped_yreal))
y_max = max(y_max, np.nanmin(clipped_yreal))
y_min = min(y_min, np.nanmin(yreal))
y_max = max(y_max, np.nanmin(yreal))

if y_min == y_max:
# To avoid matplotlib UserWarning when calling `set_ylim`
y_min, y_max = y_min - 0.1, y_max + 0.1
Expand Down
12 changes: 11 additions & 1 deletion hyperspy/interactive.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,8 +127,18 @@ def update(self):


def interactive(f, event="auto", recompute_out_event="auto", *args, **kwargs):
"""
%s
Returns
-------
:py:class:`~hyperspy.signal.BaseSignal` or one of its subclass
Signal updated with the operation result when a given event is
triggered.
"""
cls = Interactive(f, event, recompute_out_event, *args, **kwargs)
return cls.out


interactive.__doc__ = Interactive.__init__.__doc__
interactive.__doc__ %= Interactive.__init__.__doc__
28 changes: 20 additions & 8 deletions hyperspy/io_plugins/_hierarchical.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def get_signal_chunks(shape, dtype, signal_axes=None, target_size=1e6):

# largely based on the guess_chunk in h5py
bytes_per_signal = multiply([shape[i] for i in signal_axes]) * typesize
signals_per_chunk = np.floor_divide(target_size, bytes_per_signal)
signals_per_chunk = int(np.floor_divide(target_size, bytes_per_signal))
navigation_axes = tuple(i for i in range(len(shape)) if i not in
signal_axes)
num_nav_axes = len(navigation_axes)
Expand All @@ -60,13 +60,25 @@ def get_signal_chunks(shape, dtype, signal_axes=None, target_size=1e6):
return shape
else:
# signal is smaller than chunk max
sig_axes_chunk = np.floor(signals_per_chunk**(1/num_nav_axes))
remainder = np.floor_divide(signals_per_chunk - (sig_axes_chunk**num_nav_axes),
sig_axes_chunk)
if remainder<0:
remainder =0
chunks = [s if i in signal_axes else sig_axes_chunk for i, s in enumerate(shape)]
chunks[navigation_axes[0]] = chunks[navigation_axes[0]]+remainder
# Index of axes with size smaller than required to make all chunks equal
small_idx = []
# Sizes of axes with size smaller than required to make all chunks equal
small_sizes = []
iterate = True
while iterate:
iterate = False
# Calculate the size of the chunks of the axes not in `small_idx`
# The process is iterative because `nav_axes_chunks` can be bigger
# than some axes sizes. If that is the case, the value must be
# recomputed at the next iteration after having added the "offending"
# axes to `small_idx`
nav_axes_chunks = int(np.floor((signals_per_chunk / np.prod(small_sizes))**(1 / (num_nav_axes - len(small_sizes)))))
for index, size in enumerate(shape):
if index not in (list(signal_axes) + small_idx) and size < nav_axes_chunks:
small_idx.append(index)
small_sizes.append(size)
iterate = True
chunks = [s if i in signal_axes or i in small_idx else nav_axes_chunks for i, s in enumerate(shape)]
return tuple(int(x) for x in chunks)


Expand Down
34 changes: 29 additions & 5 deletions hyperspy/io_plugins/tiff.py
Original file line number Diff line number Diff line change
Expand Up @@ -348,9 +348,19 @@ def _axes_fei(tiff, op, shape, names):
del op['FEI_HELIOS']
except KeyError:
del op['FEI_SFEG']
scales['x'] = float(op['fei_metadata']['Scan']['PixelWidth'])
scales['y'] = float(op['fei_metadata']['Scan']['PixelHeight'])
units.update({'x': 'm', 'y': 'm'})
try:
scales['x'] = float(op['fei_metadata']['Scan']['PixelWidth'])
scales['y'] = float(op['fei_metadata']['Scan']['PixelHeight'])
units.update({'x': 'm', 'y': 'm'})
except KeyError:
_logger.debug("No 'Scan' information found in FEI metadata; attempting to get pixel size "
"from 'IRBeam' metadata")
try:
scales['x'] = float(op['fei_metadata']['IRBeam']['HFW']) / float(op['fei_metadata']['Image']['ResolutionX'])
scales['y'] = float(op['fei_metadata']['IRBeam']['VFW']) / float(op['fei_metadata']['Image']['ResolutionY'])
units.update({'x': 'm', 'y': 'm'})
except KeyError:
_logger.warning("Could not determine pixel size; resulting Signal will not be calibrated")

scales, offsets, units = _order_axes_by_name(names, scales, offsets, units)

Expand Down Expand Up @@ -864,6 +874,20 @@ def _parse_beam_current_FEI(value):
return None


def _parse_beam_energy_FEI(value):
try:
return float(value) * 1e-3
except ValueError:
return None


def _parse_working_distance_FEI(value):
try:
return float(value) * 1e3
except ValueError:
return None


def _parse_tuple_Zeiss(tup):
value = tup[1]
try:
Expand Down Expand Up @@ -899,7 +923,7 @@ def _parse_string(value):

mapping_fei = {
'fei_metadata.Beam.HV':
("Acquisition_instrument.SEM.beam_energy", lambda x: float(x) * 1e-3),
("Acquisition_instrument.SEM.beam_energy", _parse_beam_energy_FEI),
'fei_metadata.Stage.StageX':
("Acquisition_instrument.SEM.Stage.x", None),
'fei_metadata.Stage.StageY':
Expand All @@ -911,7 +935,7 @@ def _parse_string(value):
'fei_metadata.Stage.StageT':
("Acquisition_instrument.SEM.Stage.tilt", None),
'fei_metadata.Stage.WorkingDistance':
("Acquisition_instrument.SEM.working_distance", lambda x: float(x) * 1e3),
("Acquisition_instrument.SEM.working_distance", _parse_working_distance_FEI),
'fei_metadata.Scan.Dwelltime':
("Acquisition_instrument.SEM.dwell_time", None),
'fei_metadata.EBeam.BeamCurrent':
Expand Down

0 comments on commit c868840

Please sign in to comment.