diff --git a/py4DSTEM/braggvectors/probe.py b/py4DSTEM/braggvectors/probe.py index d97d89c48..e51e4450d 100644 --- a/py4DSTEM/braggvectors/probe.py +++ b/py4DSTEM/braggvectors/probe.py @@ -307,7 +307,9 @@ def zero_vacuum( alpha_max=1.2, ): """ - Sets pixels outside of the probe's central disk to zero. + Sets pixels outside of the probe's central disk to zero using a + hard mask at alpha*alpha_max. To use a continuous mask, try + zero_vacuum_sinusoid. The probe origin and convergence semiangle must be set for this method to run - these can be set using `measure_disk`. Pixels are @@ -335,6 +337,57 @@ def zero_vacuum( self.probe *= mask pass + def zero_vacuum_sinusoid(self, s=0.1, w=1, p=1, update_inplace=True): + """ + Sets pixels outside of the probe's central disk to zero using a + continuous mask with sinusoidal^p decay over the range + [alpha+s, alpha+s+w] for semiangle alpha. + + The probe origin and convergence semiangle must be set for this + method to run - these can be set using `measure_disk`. Pixels are + defined as outside the central disk if their distance from the origin + exceeds the semiconvergence angle * alpha_max. + + Parameters + ---------- + s : number + Damping begins at radius alpha*(1+s) + w : number + Width of damping window, i.e. damping ends at alpha*(1+s)+w + p : number + Scaling power for damping sindusoid + update_inplace : bool + Toggle updating the self.probe value or returning it only + """ + # validate inputs + assert ( + self.alpha is not None + ), "no probe semiconvergence angle found; try running `Probe.measure_disk`" + assert ( + self.origin is not None + ), "no probe origin found; try running `Probe.measure_disk`" + # prepare vars + qx0, qy0 = self.origin + alpha = self.alpha + probe = self.probe + Q_Nx, Q_Ny = probe.shape + # setup mesh + qyy, qxx = np.meshgrid(np.arange(Q_Ny), np.arange(Q_Nx)) + qrr = np.hypot(qxx - qx0, qyy - qy0) + # find damping curve + r = alpha * (1 + s) + _w = alpha * w + f = 1 + (r - qrr) / _w + f = np.minimum(np.maximum(f, 0), 1) + f = np.sin(f) ** p + # scale probe + probe *= f + # update + if update_inplace: + self.probe = probe + # return scaled probe + return probe + # Kernel generation methods def get_kernel( @@ -400,15 +453,9 @@ def get_kernel( # check for the origin if origin is None: try: - x = self.calibration.get_probe_params() + origin = self.origin except AttributeError: - x = None - finally: - if x is None: - origin = None - else: - r, x, y = x - origin = (x, y) + origin = None # get the data probe = data if data is not None else self.probe @@ -592,7 +639,7 @@ def get_probe_kernel_edge_sigmoid( np.mod(np.arange(Q_Ny) + Q_Ny // 2, Q_Ny) - Q_Ny // 2, np.mod(np.arange(Q_Nx) + Q_Nx // 2, Q_Nx) - Q_Nx // 2, ) - qr = np.sqrt(qx**2 + qy**2) + qr = np.hypot(qx, qy) # Calculate sigmoid if type == "logistic": r0 = 0.5 * (ro + ri) diff --git a/py4DSTEM/datacube/datacube.py b/py4DSTEM/datacube/datacube.py index 93a48bbdd..e8d96715d 100644 --- a/py4DSTEM/datacube/datacube.py +++ b/py4DSTEM/datacube/datacube.py @@ -11,11 +11,13 @@ gaussian_filter, ) from typing import Optional, Union +import matplotlib.pyplot as plt from emdfile import Array, Metadata, Node, Root, tqdmnd from py4DSTEM.data import Data, Calibration from py4DSTEM.datacube.virtualimage import DataCubeVirtualImager from py4DSTEM.datacube.virtualdiffraction import DataCubeVirtualDiffraction +from py4DSTEM.visualize import show class DataCube( @@ -515,11 +517,9 @@ def get_vacuum_probe( self, ROI=None, align=True, + zero_vacuum=True, mask=None, - threshold=0.0, - expansion=12, - opening=3, - verbose=False, + plot=True, returncalc=True, ): """ @@ -541,20 +541,20 @@ def get_vacuum_probe( (rx_min,rx_max,ry_min,ry_max) align : optional, bool if True, aligns the probes before averaging + zero_vacuum : bool + if True, zeros vacuum pixels outside the central probe using a + sinusoidal^p decay over the range [alpha+s, alpha+s+w]. + s : number + See zero vacuum + w : number + See zero vacuum + p : number + See zero vacuum mask : optional, array mask applied to each diffraction pattern before alignment and averaging - threshold : float - in the final masking step, values less than max(probe)*threshold - are considered outside the probe - expansion : int - number of pixels by which the final mask is expanded after - thresholding - opening : int - size of binary opening applied to the final mask to eliminate stray - bright pixels - verbose : bool - toggles verbose output + plot : bool + Toggle displays returncalc : bool if True, returns the answer @@ -594,23 +594,78 @@ def get_vacuum_probe( curr_DP = get_shifted_ar(curr_DP, xshift, yshift) probe = probe * (n - 1) / n + curr_DP / n - # mask - mask = probe > np.max(probe) * threshold - mask = binary_opening(mask, iterations=opening) - mask = binary_dilation(mask, iterations=1) - mask = ( - np.cos( - (np.pi / 2) - * np.minimum( - distance_transform_edt(np.logical_not(mask)) / expansion, 1 - ) + # make a probe + probe = Probe(probe) + + # measure probe size + probe.measure_disk(zero_vacuum=False, plot=False) + + # zero vacuum + _p = probe.probe + if zero_vacuum: + probe.zero_vacuum_sinusoid() + + # show + if plot: + fig, axs = plt.subplots(2, 3, figsize=(12, 8)) + show( + probe.probe, + scaling="log", + circle={ + "center": probe.origin, + "R": probe.alpha, + "alpha": 0.2, + "fill": True, + }, + figax=(fig, axs[0, 0]), ) - ** 2 - ) - probe *= mask + show( + probe.probe, + scaling="none", + intensity_range="minmax", + circle={ + "center": probe.origin, + "R": probe.alpha, + "alpha": 0.2, + "fill": True, + }, + figax=(fig, axs[0, 1]), + ) + show( + probe.probe, + scaling="none", + intensity_range="absolute", + vmin=0, + vmax=np.max(probe.probe) * 0.1, + circle={ + "center": probe.origin, + "R": probe.alpha, + "alpha": 0.2, + "fill": True, + }, + figax=(fig, axs[0, 2]), + ) + show(probe.probe, scaling="log", figax=(fig, axs[1, 0])) + show( + probe.probe, + scaling="none", + intensity_range="minmax", + figax=(fig, axs[1, 1]), + ) + show( + probe.probe, + scaling="none", + intensity_range="absolute", + vmin=0, + vmax=np.max(probe.probe) * 0.1, + figax=(fig, axs[1, 2]), + ) + axs[0, 0].set_title("log") + axs[0, 1].set_title("linear | min/max") + axs[0, 2].set_title("linear | min/0.1*max") + plt.show() - # make a probe, add to tree, and return - probe = Probe(probe) + # add probe to tree and return self.attach(probe) if returncalc: return probe diff --git a/py4DSTEM/process/diffraction/digital_dark_field.py b/py4DSTEM/process/diffraction/digital_dark_field.py index 54646a0c2..cbf2778f4 100644 --- a/py4DSTEM/process/diffraction/digital_dark_field.py +++ b/py4DSTEM/process/diffraction/digital_dark_field.py @@ -1,6 +1,6 @@ """ A general note on all these functions is that they are designed for use with rotation calibration into the pointslist. -However, they have to date only been used with the Qx and Qy in pixels and not calibrated into reciprocal units. +However, they have to date only been used with the Qx and Qy in pixels and not calibrated into reciprocal units. There is no reason why this should not work, but the default tolerance would need adjustment. """ diff --git a/py4DSTEM/process/polar/polar_analysis.py b/py4DSTEM/process/polar/polar_analysis.py index ece76d956..67edff2cf 100644 --- a/py4DSTEM/process/polar/polar_analysis.py +++ b/py4DSTEM/process/polar/polar_analysis.py @@ -28,15 +28,15 @@ def calculate_radial_statistics( compute the mean and standard deviation pattern by pattern, i.e. for diffraction signal d(x,y; q,theta) we take - d_mean_all(x,y; q) = \int_{0}^{2\pi} d(x,y; q,\theta) d\theta - d_var_all(x,y; q) = \int_{0}^{2\pi} - \( d(x,y; q,\theta) - d_mean_all(x,y; q,\theta) \)^2 d\theta + d_mean_all(x,y; q) = \\int_{0}^{2\\pi} d(x,y; q,\\theta) d\\theta + d_var_all(x,y; q) = \\int_{0}^{2\\pi} + \\( d(x,y; q,\\theta) - d_mean_all(x,y; q,\\theta) \\)^2 d\\theta Then we find the mean and variance profiles by taking the means of these quantities over all scan positions: - d_mean(q) = \sum_{x,y} d_mean_all(x,y; q) - d_var(q) = \sum_{x,y} d_var_all(x,y; q) + d_mean(q) = \\sum_{x,y} d_mean_all(x,y; q) + d_var(q) = \\sum_{x,y} d_var_all(x,y; q) and the normalized variance is d_var/d_mean. @@ -264,13 +264,13 @@ def calculate_pair_dist_function( filters are optionally applied. The structure factor is then inverted into the reduced pair distribution function g(r) using - g(r) = \frac{2}{\pi) \int sin( 2\pi r k ) S(k) dk + g(r) = \\frac{2}{\\pi) \\int sin( 2\\pi r k ) S(k) dk The value of the integral is (optionally) damped to zero at the origin to match the physical requirement that this condition holds. Finally, the full PDF G(r) is computed if a known density is provided, using - G(r) = 1 + [ \frac{2}{\pi} * g(r) / ( 4\pi * D * r dr ) ] + G(r) = 1 + [ \\frac{2}{\\pi} * g(r) / ( 4\\pi * D * r dr ) ] This follows the methods described in [@cophus TODO ADD CITATION].