From 0703e10be79412d211587f5ce1cc951876403024 Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Thu, 28 Nov 2024 12:24:06 +0100 Subject: [PATCH 01/37] experiment simplifying the amor workflow --- src/ess/amor/__init__.py | 8 +- src/ess/amor/conversions.py | 104 +++++++++-- src/ess/amor/figures.py | 124 +++++++------ src/ess/amor/geometry.py | 75 ++++---- src/ess/amor/load.py | 113 ++++-------- src/ess/amor/normalization.py | 120 +++++++++++++ src/ess/amor/orso.py | 4 +- src/ess/amor/resolution.py | 148 +++++++++------- src/ess/amor/types.py | 12 +- src/ess/amor/utils.py | 26 +-- src/ess/reflectometry/__init__.py | 10 +- src/ess/reflectometry/calibrations.py | 45 ----- src/ess/reflectometry/conversions.py | 164 +----------------- src/ess/reflectometry/corrections.py | 81 --------- src/ess/reflectometry/load.py | 6 +- src/ess/reflectometry/normalize.py | 149 ---------------- src/ess/reflectometry/orso.py | 9 +- src/ess/reflectometry/supermirror/__init__.py | 32 ++++ src/ess/reflectometry/tools.py | 47 +---- src/ess/reflectometry/types.py | 42 +---- src/ess/reflectometry/workflow.py | 8 +- 21 files changed, 529 insertions(+), 798 deletions(-) create mode 100644 src/ess/amor/normalization.py delete mode 100644 src/ess/reflectometry/calibrations.py delete mode 100644 src/ess/reflectometry/corrections.py delete mode 100644 src/ess/reflectometry/normalize.py diff --git a/src/ess/amor/__init__.py b/src/ess/amor/__init__.py index 0290e6ba..e800d889 100644 --- a/src/ess/amor/__init__.py +++ b/src/ess/amor/__init__.py @@ -5,23 +5,21 @@ import sciline import scipp as sc + from ..reflectometry import providers as reflectometry_providers from ..reflectometry import supermirror from ..reflectometry.types import ( BeamSize, DetectorSpatialResolution, - Gravity, NeXusDetectorName, RunType, SamplePosition, BeamDivergenceLimits, ) -from . import conversions, load, orso, resolution, utils, figures +from . import conversions, load, orso, resolution, utils, figures, normalization from .instrument_view import instrument_view from .types import ( AngularResolution, - Chopper1Position, - Chopper2Position, ChopperFrequency, ChopperPhase, QThetaFigure, @@ -42,6 +40,7 @@ *reflectometry_providers, *load.providers, *conversions.providers, + *normalization.providers, *resolution.providers, *utils.providers, *figures.providers, @@ -61,7 +60,6 @@ def default_parameters() -> dict: supermirror.Alpha: sc.scalar(0.25 / 0.088, unit=sc.units.angstrom), BeamSize[RunType]: 2.0 * sc.units.mm, DetectorSpatialResolution[RunType]: 0.0025 * sc.units.m, - Gravity: sc.vector(value=[0, -1, 0]) * sc.constants.g, SamplePosition[RunType]: sc.vector([0, 0, 0], unit="m"), NeXusDetectorName[RunType]: "detector", ChopperPhase[RunType]: sc.scalar(7.0, unit="deg"), diff --git a/src/ess/amor/conversions.py b/src/ess/amor/conversions.py index 6d8c99f9..706ed647 100644 --- a/src/ess/amor/conversions.py +++ b/src/ess/amor/conversions.py @@ -1,24 +1,96 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright (c) 2023 Scipp contributors (https://github.com/scipp) +import numpy as np import scipp as sc -from ..reflectometry.types import IncidentBeam, RunType, SamplePosition -from .types import Chopper1Position, Chopper2Position +from ..reflectometry.types import ( + BeamDivergenceLimits, + RawDetectorData, + ReducibleData, + RunType, + YIndexLimits, +) -def incident_beam( - source_chopper_1_position: Chopper1Position[RunType], - source_chopper_2_position: Chopper2Position[RunType], - sample_position: SamplePosition[RunType], -) -> IncidentBeam[RunType]: - """ - Compute the incident beam vector from the source chopper position vector, - instead of the source_position vector. - """ - chopper_midpoint = ( - source_chopper_1_position + source_chopper_2_position - ) * sc.scalar(0.5) - return sample_position - chopper_midpoint +def angle_of_reflection( + wavelength, divergence_angle, L2, sample_rotation, detector_rotation +): + c = sc.constants.g * sc.constants.m_n**2 / sc.constants.h**2 + out = (c * L2 * wavelength**2).to(unit='dimensionless') + sc.sin( + divergence_angle + detector_rotation + ) + out = sc.asin(out, out=out) + out -= sample_rotation + return out -providers = (incident_beam,) +def angle_of_divergence(angle_of_reflection, sample_rotation): + return angle_of_reflection - sample_rotation + + +def wavelength( + event_time_offset, divergence_angle, L1, L2, chopper_phase, chopper_frequency +): + out = event_time_offset.to(unit="ns", dtype="float64", copy=True) + unit = out.bins.unit + tau = (1 / (2 * chopper_frequency)).to(unit=unit) + tof_offset = tau * chopper_phase / (180.0 * sc.units.deg) + + minimum = -tof_offset + frame_bound = tau - tof_offset + maximum = 2 * tau - tof_offset + + out += sc.where( + (minimum < event_time_offset) & (event_time_offset < frame_bound), + tof_offset, + sc.where( + (frame_bound < event_time_offset) & (event_time_offset < maximum), + tof_offset - tau, + sc.scalar(np.nan, unit=unit), + ), + ) + out -= (divergence_angle.to(unit="deg") / (180.0 * sc.units.deg)) * tau + out *= (sc.constants.h / sc.constants.m_n) / (L1 + L2) + return out.to(unit='angstrom', copy=False) + + +def add_common_coords_and_masks( + da: RawDetectorData[RunType], + ylim: YIndexLimits, + bdlim: BeamDivergenceLimits, +) -> ReducibleData[RunType]: + da.bins.coords["wavelength"] = wavelength( + event_time_offset=da.bins.coords["event_time_offset"], + divergence_angle=da.coords["pixel_divergence_angle"], + L1=da.coords["L1"], + L2=da.coords["L2"], + chopper_phase=da.coords["chopper_phase"], + chopper_frequency=da.coords["chopper_frequency"], + ) + da.bins.coords["angle_of_reflection"] = angle_of_reflection( + wavelength=da.bins.coords["wavelength"], + divergence_angle=da.coords["pixel_divergence_angle"], + L2=da.coords["L2"], + sample_rotation=da.coords["sample_rotation"], + detector_rotation=da.coords["detector_rotation"], + ) + da.bins.coords["angle_of_divergence"] = angle_of_divergence( + angle_of_reflection=da.bins.coords["angle_of_reflection"], + sample_rotation=da.coords["sample_rotation"], + ) + da.masks["stripe_range"] = (da.coords["stripe"] < ylim[0]) | ( + da.coords["stripe"] > ylim[1] + ) + da.bins.masks["divergence_too_large"] = ( + da.bins.coords["angle_of_divergence"] + < bdlim[0].to(unit=da.bins.coords["angle_of_divergence"].bins.unit) + ) | ( + da.bins.coords["angle_of_divergence"] + > bdlim[1].to(unit=da.bins.coords["angle_of_divergence"].bins.unit) + ) + # Footprint correction + da /= sc.sin(da.bins.coords['angle_of_reflection']) + return da + + +providers = (add_common_coords_and_masks,) diff --git a/src/ess/amor/figures.py b/src/ess/amor/figures.py index bc7d7c7b..e254de78 100644 --- a/src/ess/amor/figures.py +++ b/src/ess/amor/figures.py @@ -6,8 +6,8 @@ from ess.reflectometry.types import ( QBins, - ReflectivityData, ReflectivityOverQ, + ReflectivityOverZW, SampleRun, ) @@ -18,7 +18,7 @@ WavelengthThetaFigure, WavelengthZIndexFigure, ) -from .utils import theta_grid +from .utils import angle_of_reflection_grid def _reshape_array_to_expected_shape(da, dims, **bins): @@ -52,38 +52,41 @@ def _repeat_variable_argument(n, arg): ) -def _try_to_create_theta_grid_if_missing(bins, da): +def _try_to_create_angle_of_reflection_grid_if_missing(bins, da): if ( - 'theta' not in bins - and 'theta' not in da.dims + 'angle_of_reflection' not in bins + and 'angle_of_reflection' not in da.dims and 'sample_rotation' in da.coords and 'detector_rotation' in da.coords ): - bins['theta'] = theta_grid( + bins['angle_of_reflection'] = angle_of_reflection_grid( nu=da.coords['detector_rotation'], mu=da.coords['sample_rotation'] ) -def wavelength_theta_figure( +def wavelength_angle_of_reflection_figure( da: sc.DataArray | Sequence[sc.DataArray], *, wavelength_bins: (sc.Variable | None) | Sequence[sc.Variable | None] = None, - theta_bins: (sc.Variable | None) | Sequence[sc.Variable | None] = None, + angle_of_reflection_bins: (sc.Variable | None) + | Sequence[sc.Variable | None] = None, q_edges_to_display: Sequence[sc.Variable] = (), linewidth: float = 1.0, **kwargs, ): ''' - Creates a figure displaying a histogram over :math:`\\theta` and :math:`\\lambda`. + Creates a figure displaying a histogram over :math:`\\angle_of_reflection` + and :math:`\\lambda`. The input can either be a single data array containing the data to display, or a sequence of data arrays. - The inputs must either have coordinates called "theta" and "wavelength", - or they must be histograms with dimensions "theta" and "wavelength". + The inputs must either have coordinates called "angle_of_reflection" + and "wavelength", or they must be histograms with dimensions + "angle_of_reflection" and "wavelength". - If :code:`wavelength_bins` or :code:`theta_bins` are provided, they are used - to construct the histogram. If not provided, the function uses the + If :code:`wavelength_bins` or :code:`angle_of_reflection_bins` are provided, + they are used to construct the histogram. If not provided, the function uses the bin edges that already exist on the data arrays. If :code:`q_edges_to_display` is provided, lines will be drawn in the figure @@ -95,8 +98,8 @@ def wavelength_theta_figure( Data arrays to display. wavelength_bins : array-like, optional Bins used to histogram the data in wavelength. - theta_bins : array-like, optional - Bins used to histogram the data in theta. + angle_of_reflection_bins : array-like, optional + Bins used to histogram the data in angle_of_reflection. q_edges_to_display : sequence of float, optional Values of :math:`Q` to be displayed as straight lines in the figure. linewidth : float, optional @@ -111,39 +114,44 @@ def wavelength_theta_figure( ''' if isinstance(da, sc.DataArray): - return wavelength_theta_figure( + return wavelength_angle_of_reflection_figure( (da,), wavelength_bins=(wavelength_bins,), - theta_bins=(theta_bins,), + angle_of_reflection_bins=(angle_of_reflection_bins,), q_edges_to_display=q_edges_to_display, **kwargs, ) - wavelength_bins, theta_bins = ( - _repeat_variable_argument(len(da), arg) for arg in (wavelength_bins, theta_bins) + wavelength_bins, angle_of_reflection_bins = ( + _repeat_variable_argument(len(da), arg) + for arg in (wavelength_bins, angle_of_reflection_bins) ) hs = [] - for d, wavelength_bin, theta_bin in zip( - da, wavelength_bins, theta_bins, strict=True + for d, wavelength_bin, angle_of_reflection_bin in zip( + da, wavelength_bins, angle_of_reflection_bins, strict=True ): bins = {} if wavelength_bin is not None: bins['wavelength'] = wavelength_bin - if theta_bin is not None: - bins['theta'] = theta_bin + if angle_of_reflection_bin is not None: + bins['angle_of_reflection'] = angle_of_reflection_bin - _try_to_create_theta_grid_if_missing(bins, d) + _try_to_create_angle_of_reflection_grid_if_missing(bins, d) - hs.append(_reshape_array_to_expected_shape(d, ('theta', 'wavelength'), **bins)) + hs.append( + _reshape_array_to_expected_shape( + d, ('angle_of_reflection', 'wavelength'), **bins + ) + ) kwargs.setdefault('cbar', True) kwargs.setdefault('norm', 'log') p = pp.imagefigure(*(pp.Node(h) for h in hs), **kwargs) for q in q_edges_to_display: - thmax = max(h.coords["theta"].max() for h in hs) + thmax = max(h.coords["angle_of_reflection"].max() for h in hs) p.ax.plot( [0.0, 4 * np.pi * (sc.sin(thmax) / q).value], [0.0, thmax.value], @@ -155,23 +163,25 @@ def wavelength_theta_figure( return p -def q_theta_figure( +def q_angle_of_reflection_figure( da: sc.DataArray | Sequence[sc.DataArray], *, q_bins: (sc.Variable | None) | Sequence[sc.Variable | None] = None, - theta_bins: (sc.Variable | None) | Sequence[sc.Variable | None] = None, + angle_of_reflection_bins: (sc.Variable | None) + | Sequence[sc.Variable | None] = None, **kwargs, ): ''' - Creates a figure displaying a histogram over :math:`\\theta` and :math:`Q`. + Creates a figure displaying a histogram over :math:`\\angle_of_reflection` + and :math:`Q`. The input can either be a single data array containing the data to display, or a sequence of data arrays. - The inputs must either have coordinates called "theta" and "Q", - or they must be histograms with dimensions "theta" and "Q". + The inputs must either have coordinates called "angle_of_reflection" and "Q", + or they must be histograms with dimensions "angle_of_reflection" and "Q". - If :code:`theta_bins` or :code:`q_bins` are provided, they are used + If :code:`angle_of_reflection_bins` or :code:`q_bins` are provided, they are used to construct the histogram. If not provided, the function uses the bin edges that already exist on the data arrays. @@ -181,8 +191,8 @@ def q_theta_figure( Data arrays to display. q_bins : array-like, optional Bins used to histogram the data in Q. - theta_bins : array-like, optional - Bins used to histogram the data in theta. + angle_of_reflection_bins : array-like, optional + Bins used to histogram the data in angle_of_reflection. Returns ------- @@ -190,27 +200,35 @@ def q_theta_figure( ''' if isinstance(da, sc.DataArray): - return q_theta_figure( - (da,), q_bins=(q_bins,), theta_bins=(theta_bins,), **kwargs + return q_angle_of_reflection_figure( + (da,), + q_bins=(q_bins,), + angle_of_reflection_bins=(angle_of_reflection_bins,), + **kwargs, ) - q_bins, theta_bins = ( - _repeat_variable_argument(len(da), arg) for arg in (q_bins, theta_bins) + q_bins, angle_of_reflection_bins = ( + _repeat_variable_argument(len(da), arg) + for arg in (q_bins, angle_of_reflection_bins) ) hs = [] - for d, q_bin, theta_bin in zip(da, q_bins, theta_bins, strict=True): + for d, q_bin, angle_of_reflection_bin in zip( + da, q_bins, angle_of_reflection_bins, strict=True + ): bins = {} if q_bin is not None: bins['Q'] = q_bin - if theta_bin is not None: - bins['theta'] = theta_bin + if angle_of_reflection_bin is not None: + bins['angle_of_reflection'] = angle_of_reflection_bin - _try_to_create_theta_grid_if_missing(bins, d) + _try_to_create_angle_of_reflection_grid_if_missing(bins, d) - hs.append(_reshape_array_to_expected_shape(d, ('theta', 'Q'), **bins)) + hs.append( + _reshape_array_to_expected_shape(d, ('angle_of_reflection', 'Q'), **bins) + ) kwargs.setdefault('cbar', True) kwargs.setdefault('norm', 'log') @@ -272,23 +290,25 @@ def wavelength_z_figure( return pp.imagefigure(*(pp.Node(h) for h in hs), **kwargs) -def wavelength_theta_diagnostic_figure( - da: ReflectivityData, +def wavelength_angle_of_reflection_diagnostic_figure( + da: ReflectivityOverZW, thbins: ThetaBins[SampleRun], ) -> WavelengthThetaFigure: - return wavelength_theta_figure(da, theta_bins=thbins) + return wavelength_angle_of_reflection_figure(da, angle_of_reflection_bins=thbins) -def q_theta_diagnostic_figure( - da: ReflectivityData, +def q_angle_of_reflection_diagnostic_figure( + da: ReflectivityOverZW, thbins: ThetaBins[SampleRun], qbins: QBins, ) -> QThetaFigure: - return q_theta_figure(da, q_bins=qbins, theta_bins=thbins) + return q_angle_of_reflection_figure( + da, q_bins=qbins, angle_of_reflection_bins=thbins + ) def wavelength_z_diagnostic_figure( - da: ReflectivityData, + da: ReflectivityOverZW, ) -> WavelengthZIndexFigure: return wavelength_z_figure(da) @@ -305,7 +325,7 @@ def diagnostic_view( providers = ( wavelength_z_diagnostic_figure, - wavelength_theta_diagnostic_figure, - q_theta_diagnostic_figure, + wavelength_angle_of_reflection_diagnostic_figure, + q_angle_of_reflection_diagnostic_figure, diagnostic_view, ) diff --git a/src/ess/amor/geometry.py b/src/ess/amor/geometry.py index 6134b44c..55d576c8 100644 --- a/src/ess/amor/geometry.py +++ b/src/ess/amor/geometry.py @@ -26,49 +26,44 @@ class Detector: distance = sc.scalar(4000, unit="mm") -def _pixel_coordinate_in_detector_system( - pixelID: sc.Variable, -) -> tuple[sc.Variable, sc.Variable]: +def pixel_coordinates_in_detector_system() -> tuple[sc.Variable, sc.Variable]: """Determines beam travel distance inside the detector and the beam divergence angle from the detector number.""" - (bladeNr, bPixel) = ( - pixelID // (Detector.nWires * Detector.nStripes), - pixelID % (Detector.nWires * Detector.nStripes), + pixels = sc.DataArray( + sc.arange( + 'row', + 1, + ( + Detector.nBlades * Detector.nWires * Detector.nStripes + sc.scalar(1) + ).values, + unit=None, + ).fold( + 'row', + sizes={ + 'blade': Detector.nBlades, + 'wire': Detector.nWires, + 'stripe': Detector.nStripes, + }, + ), + coords={ + 'blade': sc.arange('blade', sc.scalar(0), Detector.nBlades), + 'wire': sc.arange('wire', sc.scalar(0), Detector.nWires), + 'stripe': sc.arange('stripe', sc.scalar(0), Detector.nStripes), + }, ) - # z index on blade, y index on detector - bZi = bPixel // Detector.nStripes # x position in detector # TODO: check with Jochen if this is correct, as old code was: # detX = bZi * Detector.dX - distance_inside_detector = (Detector.nWires - 1 - bZi) * Detector.dX - - bladeAngle = (2.0 * sc.asin(0.5 * Detector.bladeZ / Detector.distance)).to( - unit="degree" - ) - beam_divergence_angle = (Detector.nBlades / 2.0 - bladeNr) * bladeAngle - ( - sc.atan(bZi * Detector.dZ / (Detector.distance + bZi * Detector.dX)) - ).to(unit="degree") - return distance_inside_detector, beam_divergence_angle - - -def pixel_coordinate_in_lab_frame( - pixelID: sc.Variable, nu: sc.Variable -) -> tuple[sc.Variable, sc.Variable]: - """Computes spatial coordinates (lab reference frame), and the beam divergence - angle for the detector pixel associated with `pixelID`""" - distance_in_detector, divergence_angle = _pixel_coordinate_in_detector_system( - pixelID - ) - - angle_to_horizon = (nu + divergence_angle).to(unit="rad") - distance_to_pixel = distance_in_detector + Detector.distance - - global_Y = distance_to_pixel * sc.sin(angle_to_horizon) - global_Z = distance_to_pixel * sc.cos(angle_to_horizon) - # TODO: the values for global_X are right now just an estimate. We should check with - # the instrument scientist what the actual values are. The X positions are ignored - # in the coordinate transformation, so this is not critical. - global_X = sc.zeros_like(global_Z) + sc.linspace( - "stripe", -0.1, 0.1, global_Z.sizes["stripe"], unit="m" - ).to(unit=global_Z.unit) - return sc.spatial.as_vectors(global_X, global_Y, global_Z), divergence_angle + pixels.coords['distance_in_detector'] = ( + Detector.nWires - 1 - pixels.coords['wire'] + ) * Detector.dX + bladeAngle = 2.0 * sc.asin(0.5 * Detector.bladeZ / Detector.distance) + pixels.coords['pixel_divergence_angle'] = ( + (Detector.nBlades / 2.0 - pixels.coords['blade']) * bladeAngle + - sc.atan( + pixels.coords['wire'] + * Detector.dZ + / (Detector.distance + pixels.coords['wire'] * Detector.dX) + ) + ).to(unit='rad') + return pixels diff --git a/src/ess/amor/load.py b/src/ess/amor/load.py index bc0cbcb2..4fd80e75 100644 --- a/src/ess/amor/load.py +++ b/src/ess/amor/load.py @@ -2,6 +2,8 @@ # Copyright (c) 2023 Scipp contributors (https://github.com/scipp) import scipp as sc +from ess.reduce import nexus + from ..reflectometry.load import load_nx from ..reflectometry.types import ( DetectorRotation, @@ -9,16 +11,15 @@ LoadedNeXusDetector, NeXusDetectorName, RawDetectorData, - ReducibleDetectorData, RunType, SampleRotation, ) -from .geometry import Detector, pixel_coordinate_in_lab_frame +from .geometry import Detector, pixel_coordinates_in_detector_system from .types import ( - Chopper1Position, - Chopper2Position, + ChopperDistance, ChopperFrequency, ChopperPhase, + ChopperSeparation, RawChopper, ) @@ -26,102 +27,55 @@ def load_detector( file_path: Filename[RunType], detector_name: NeXusDetectorName[RunType] ) -> LoadedNeXusDetector[RunType]: - return next(load_nx(file_path, f"NXentry/NXinstrument/{detector_name}")) + return nexus.load_detector(file_path=file_path, detector_name=detector_name) def load_events( - detector: LoadedNeXusDetector[RunType], detector_rotation: DetectorRotation[RunType] + detector: LoadedNeXusDetector[RunType], + detector_rotation: DetectorRotation[RunType], + sample_rotation: SampleRotation[RunType], + chopper_phase: ChopperPhase[RunType], + chopper_frequency: ChopperFrequency[RunType], + chopper_distance: ChopperDistance[RunType], ) -> RawDetectorData[RunType]: - detector_numbers = sc.arange( - "event_id", - start=1, - stop=(Detector.nBlades * Detector.nWires * Detector.nStripes).value + 1, - unit=None, - dtype="int32", - ) + detector_numbers = pixel_coordinates_in_detector_system() data = ( - detector['data'] + nexus.extract_detector_data(detector) .bins.constituents["data"] - .group(detector_numbers) - .fold( - "event_id", - sizes={ - "blade": Detector.nBlades, - "wire": Detector.nWires, - "stripe": Detector.nStripes, - }, - ) + .group(detector_numbers.data.flatten(to='event_id')) + .fold("event_id", sizes=detector_numbers.sizes) ) - # Recent versions of scippnexus no longer add variances for events by default, so - # we add them here if they are missing. + for name, coord in detector_numbers.coords.items(): + data.coords[name] = coord + + data.coords['iz'] = Detector.nWires * data.coords['blade'] + data.coords['wire'] + if data.bins.constituents["data"].data.variances is None: data.bins.constituents["data"].data.variances = data.bins.constituents[ "data" ].data.values - pixel_inds = sc.array(dims=data.dims, values=data.coords["event_id"].values - 1) - position, angle_from_center_of_beam = pixel_coordinate_in_lab_frame( - pixelID=pixel_inds, nu=detector_rotation - ) - data.coords["position"] = position.to(unit="m", copy=False) - data.coords["angle_from_center_of_beam"] = angle_from_center_of_beam + data.coords["sample_rotation"] = sample_rotation.to(unit='rad') + data.coords["detector_rotation"] = detector_rotation.to(unit='rad') + data.coords["chopper_phase"] = chopper_phase + data.coords["chopper_frequency"] = chopper_frequency + data.coords["L1"] = sc.abs(chopper_distance) + data.coords["L2"] = data.coords['distance_in_detector'] + Detector.distance return RawDetectorData[RunType](data) -def compute_tof( - data: RawDetectorData[RunType], - phase: ChopperPhase[RunType], - frequency: ChopperFrequency[RunType], -) -> ReducibleDetectorData[RunType]: - data.bins.coords["tof"] = data.bins.coords.pop("event_time_offset").to( - unit="ns", dtype="float64", copy=False - ) - - tof_unit = data.bins.coords["tof"].bins.unit - tau = sc.to_unit(1 / (2 * frequency), tof_unit) - tof_offset = tau * phase / (180.0 * sc.units.deg) - - event_time_offset = data.bins.coords["tof"] - - minimum = -tof_offset - frame_bound = tau - tof_offset - maximum = 2 * tau - tof_offset - - offset = sc.where( - (minimum < event_time_offset) & (event_time_offset < frame_bound), - tof_offset, - sc.where( - (frame_bound < event_time_offset) & (event_time_offset < maximum), - tof_offset - tau, - 0.0 * tof_unit, - ), - ) - data.bins.masks["outside_of_pulse"] = (minimum > event_time_offset) | ( - event_time_offset > maximum - ) - data.bins.coords["tof"] += offset - data.bins.coords["tof"] -= ( - data.coords["angle_from_center_of_beam"].to(unit="deg") / (180.0 * sc.units.deg) - ) * tau - return ReducibleDetectorData[RunType](data) - - def amor_chopper(f: Filename[RunType]) -> RawChopper[RunType]: return next(load_nx(f, "NXentry/NXinstrument/NXdisk_chopper")) -def load_amor_chopper_1_position(ch: RawChopper[RunType]) -> Chopper1Position[RunType]: +def load_amor_chopper_distance(ch: RawChopper[RunType]) -> ChopperDistance[RunType]: # We know the value has unit 'mm' - return sc.vector([0, 0, ch["distance"] - ch["pair_separation"] / 2], unit="mm").to( - unit="m" - ) + return sc.scalar(ch["distance"], unit="mm").to(unit="mm") -def load_amor_chopper_2_position(ch: RawChopper[RunType]) -> Chopper2Position[RunType]: +def load_amor_chopper_separation(ch: RawChopper[RunType]) -> ChopperSeparation[RunType]: # We know the value has unit 'mm' - return sc.vector([0, 0, ch["distance"] + ch["pair_separation"] / 2], unit="mm").to( - unit="m" - ) + return sc.scalar(ch["pair_separation"], unit="mm").to(unit="mm") def load_amor_ch_phase(ch: RawChopper[RunType]) -> ChopperPhase[RunType]: @@ -157,11 +111,10 @@ def load_amor_detector_rotation(fp: Filename[RunType]) -> DetectorRotation[RunTy providers = ( load_detector, load_events, - compute_tof, load_amor_ch_frequency, load_amor_ch_phase, - load_amor_chopper_1_position, - load_amor_chopper_2_position, + load_amor_chopper_distance, + load_amor_chopper_separation, load_amor_sample_rotation, load_amor_detector_rotation, amor_chopper, diff --git a/src/ess/amor/normalization.py b/src/ess/amor/normalization.py new file mode 100644 index 00000000..02dba9f4 --- /dev/null +++ b/src/ess/amor/normalization.py @@ -0,0 +1,120 @@ +import scipp as sc + +from ..reflectometry.conversions import reflectometry_q +from ..reflectometry.supermirror import ( + Alpha, + CriticalEdge, + MValue, + supermirror_reflectivity, +) +from ..reflectometry.types import ( + QBins, + QResolution, + ReducedReference, + ReducibleData, + Reference, + ReferenceRun, + ReflectivityOverQ, + ReflectivityOverZW, + SampleRun, + WavelengthBins, + ZIndexLimits, +) +from .conversions import angle_of_reflection + + +def _add_pre_reduction_masks(da, zindex_limits): + da.masks['z_range'] = (da.coords["iz"] < zindex_limits[0]) | ( + da.coords["iz"] > zindex_limits[1] + ) + + +def reduce_reference( + reference: ReducibleData[ReferenceRun], + wavelength_bins: WavelengthBins, + critical_edge: CriticalEdge, + mvalue: MValue, + alpha: Alpha, +) -> ReducedReference: + R = supermirror_reflectivity( + reflectometry_q( + wavelength=reference.bins.coords['wavelength'], + angle_of_reflection=reference.bins.coords['angle_of_reflection'], + ), + c=critical_edge, + m=mvalue, + alpha=alpha, + ) + reference.bins.masks['invalid'] = sc.isnan(R) + reference /= R + return reference.bins.concat(('stripe',)).hist(wavelength=wavelength_bins) + + +def reduce_sample_over_q( + sample: ReducibleData[SampleRun], + reference: Reference, + qbins: QBins, + wavelength_bins: WavelengthBins, + zlims: ZIndexLimits, + qresolution: QResolution, +) -> ReflectivityOverQ: + sample.bins.coords['Q'] = reflectometry_q( + sample.bins.coords['wavelength'], + sample.bins.coords['angle_of_reflection'], + ) + sample.bins.masks['wavelength'] = ( + wavelength_bins[0] > sample.bins.coords['wavelength'] + ) | (wavelength_bins[-1] < sample.bins.coords['wavelength']) + _add_pre_reduction_masks(sample, zlims) + R = sample.bins.concat().bin(Q=qbins) / reference.flatten(to='Q').hist(Q=qbins).data + R.coords['Q_resolution'] = qresolution.data + return R + + +def reduce_sample_over_zw( + sample: ReducibleData[SampleRun], + reference: Reference, + wavelength_bins: WavelengthBins, + zlims: ZIndexLimits, +) -> ReflectivityOverZW: + sample.bins.coords['Q'] = reflectometry_q( + sample.bins.coords['wavelength'], + sample.bins.coords['angle_of_reflection'], + ) + sample.bins.masks['wavelength'] = ( + wavelength_bins[0] > sample.bins.coords['wavelength'] + ) | (wavelength_bins[-1] < sample.bins.coords['wavelength']) + _add_pre_reduction_masks(sample, zlims) + R = sample.bins.concat(('stripe',)).bin(wavelength=wavelength_bins) / reference.data + R.masks["too_few_events"] = reference.data < sc.scalar(1, unit="counts") + return R + + +def evaluate_reference( + reference: ReducedReference, + sample: ReducibleData[SampleRun], + qbins: QBins, + zlims: ZIndexLimits, +) -> Reference: + ref = reference.copy() + ref.coords['angle_of_reflection'] = angle_of_reflection( + wavelength=sc.midpoints(ref.coords["wavelength"]), + divergence_angle=ref.coords["pixel_divergence_angle"], + L2=ref.coords["L2"], + sample_rotation=sample.coords["sample_rotation"], + detector_rotation=sample.coords["detector_rotation"], + ) + ref.coords['Q'] = reflectometry_q( + wavelength=sc.midpoints(ref.coords["wavelength"]), + angle_of_reflection=ref.coords["angle_of_reflection"], + ) + _add_pre_reduction_masks(ref, zlims) + return sc.values(ref) + + +providers = ( + reduce_reference, + reduce_sample_over_q, + reduce_sample_over_zw, + evaluate_reference, +) diff --git a/src/ess/amor/orso.py b/src/ess/amor/orso.py index 970f2a09..038d8283 100644 --- a/src/ess/amor/orso.py +++ b/src/ess/amor/orso.py @@ -26,7 +26,9 @@ def build_orso_instrument(events: ReflectivityOverQ) -> OrsoInstrument: return OrsoInstrument( orso_data_source.InstrumentSettings( wavelength=orso_base.ValueRange(*_limits_of_coord(events, "wavelength")), - incident_angle=orso_base.ValueRange(*_limits_of_coord(events, "theta")), + incident_angle=orso_base.ValueRange( + *_limits_of_coord(events, "angle_of_reflection") + ), polarization=None, # TODO how can we determine this from the inputs? ) ) diff --git a/src/ess/amor/resolution.py b/src/ess/amor/resolution.py index 5c395e98..a1debe1a 100644 --- a/src/ess/amor/resolution.py +++ b/src/ess/amor/resolution.py @@ -2,60 +2,82 @@ # Copyright (c) 2023 Scipp contributors (https://github.com/scipp) import scipp as sc -from ..reflectometry.tools import fwhm_to_std from ..reflectometry.types import ( DetectorSpatialResolution, - FootprintCorrectedData, QBins, QResolution, + Reference, SampleRun, SampleSize, ) from .types import ( AngularResolution, - Chopper1Position, - Chopper2Position, + ChopperSeparation, SampleSizeResolution, WavelengthResolution, ) +_STD_TO_FWHM = sc.scalar(2.0) * sc.sqrt(sc.scalar(2.0) * sc.log(sc.scalar(2.0))) + + +def fwhm_to_std(fwhm: sc.Variable) -> sc.Variable: + """ + Convert from full-width half maximum to standard deviation. + + Parameters + ---------- + fwhm: + Full-width half maximum. + + Returns + ------- + : + Standard deviation. + """ + # Enables the conversion from full width half + # maximum to standard deviation + return fwhm / _STD_TO_FWHM + def wavelength_resolution( - da: FootprintCorrectedData[SampleRun], - chopper_1_position: Chopper1Position[SampleRun], - chopper_2_position: Chopper2Position[SampleRun], -) -> WavelengthResolution: + L1, + L2, + chopper_separation, +): """ Find the wavelength resolution contribution as described in Section 4.3.3 of the Amor publication (doi: 10.1016/j.nima.2016.03.007). Parameters ---------- - chopper_1_position: - Position of first chopper (the one closer to the source). - chopper_2_position: - Position of second chopper (the one closer to the sample). - pixel_position: - Positions for detector pixels. + + L1: + Distance from midpoint between choppers to sample. + L2: + Distance from sample to detector. + chopper_separation: + Distance between choppers. Returns ------- : The angular resolution variable, as standard deviation. """ - pixel_position = da.coords["position"] - chopper_midpoint = (chopper_1_position + chopper_2_position) * sc.scalar(0.5) - distance_between_choppers = sc.norm(chopper_2_position - chopper_1_position) - chopper_detector_distance = sc.norm(pixel_position - chopper_midpoint) - return WavelengthResolution( - fwhm_to_std(distance_between_choppers / chopper_detector_distance) + return fwhm_to_std(chopper_separation / (L1 + L2)) + + +def _wavelength_resolution( + da: Reference, chopper_separation: ChopperSeparation[SampleRun] +) -> WavelengthResolution: + return wavelength_resolution( + L1=da.coords['L1'], L2=da.coords['L2'], chopper_separation=chopper_separation ) def sample_size_resolution( - da: FootprintCorrectedData[SampleRun], - sample_size: SampleSize[SampleRun], -) -> SampleSizeResolution: + L2, + sample_size, +): """ The resolution from the projected sample size, where it may be bigger than the detector pixel resolution as described in Section 4.3.3 of the Amor @@ -63,8 +85,8 @@ def sample_size_resolution( Parameters ---------- - pixel_position: - Positions for detector pixels. + L2: + Distance from sample to detector. sample_size: Size of sample. @@ -73,30 +95,30 @@ def sample_size_resolution( : Standard deviation of contribution from the sample size. """ - return fwhm_to_std( - sc.to_unit(sample_size, "m") - / sc.to_unit( - sc.norm(da.coords["position"] - da.coords["sample_position"]), - "m", - copy=False, - ) - ) + return fwhm_to_std(sample_size / L2.to(unit=sample_size.unit)) + + +def _sample_size_resolution( + da: Reference, sample_size: SampleSize[SampleRun] +) -> SampleSizeResolution: + return sample_size_resolution(L2=da.coords['L2'], sample_size=sample_size) def angular_resolution( - da: FootprintCorrectedData[SampleRun], - detector_spatial_resolution: DetectorSpatialResolution[SampleRun], -) -> AngularResolution: + angle_of_reflection, + L2, + detector_spatial_resolution, +): """ Determine the angular resolution as described in Section 4.3.3 of the Amor publication (doi: 10.1016/j.nima.2016.03.007). Parameters ---------- - pixel_position: - Positions for detector pixels. - theta: - Theta values for events. + angle_of_reflection: + Angle of reflection. + L2: + Distance between sample and detector. detector_spatial_resolution: FWHM of detector pixel resolution. @@ -105,28 +127,29 @@ def angular_resolution( : Angular resolution standard deviation """ - theta = da.bins.coords["theta"] return ( fwhm_to_std( - sc.to_unit( - sc.atan( - sc.to_unit(detector_spatial_resolution, "m") - / sc.to_unit( - sc.norm(da.coords["position"] - da.coords["sample_position"]), - "m", - copy=False, - ) - ), - theta.bins.unit, - copy=False, + sc.atan( + detector_spatial_resolution + / L2.to(unit=detector_spatial_resolution.unit) ) - ) - / theta + ).to(unit=angle_of_reflection.unit) + / angle_of_reflection + ) + + +def _angular_resolution( + da: Reference, detector_spatial_resolution: DetectorSpatialResolution[SampleRun] +) -> AngularResolution: + return angular_resolution( + angle_of_reflection=da.coords['angle_of_reflection'], + L2=da.coords['L2'], + detector_spatial_resolution=detector_spatial_resolution, ) def sigma_Q( - da: FootprintCorrectedData[SampleRun], + ref: Reference, angular_resolution: AngularResolution, wavelength_resolution: WavelengthResolution, sample_size_resolution: SampleSizeResolution, @@ -151,21 +174,26 @@ def sigma_Q( : Combined resolution function. """ - h = da.bins.concat().hist(Q=qbins) + h = ref.flatten(to='Q').hist(Q=qbins) s = ( ( - da + ref * ( angular_resolution**2 + wavelength_resolution**2 + sample_size_resolution**2 ) - * da.bins.coords['Q'] ** 2 + * ref.coords['Q'] ** 2 ) - .bins.concat() + .flatten(to='Q') .hist(Q=qbins) ) return sc.sqrt(sc.values(s / h)) -providers = (sigma_Q, angular_resolution, wavelength_resolution, sample_size_resolution) +providers = ( + sigma_Q, + _angular_resolution, + _wavelength_resolution, + _sample_size_resolution, +) diff --git a/src/ess/amor/types.py b/src/ess/amor/types.py index c4bb3039..e9b5481e 100644 --- a/src/ess/amor/types.py +++ b/src/ess/amor/types.py @@ -18,21 +18,21 @@ class ChopperPhase(sciline.Scope[RunType, sc.Variable], sc.Variable): """Phase of the choppers in the run.""" -class Chopper1Position(sciline.Scope[RunType, sc.Variable], sc.Variable): - """Position of the first chopper relative the source of the beam.""" +class ChopperDistance(sciline.Scope[RunType, sc.Variable], sc.Variable): + """Distance from the midpoint between the two choppers to the sample.""" -class Chopper2Position(sciline.Scope[RunType, sc.Variable], sc.Variable): - """Position of the second chopper relative to the source of the beam.""" +class ChopperSeparation(sciline.Scope[RunType, sc.Variable], sc.Variable): + """Distance between the two choppers.""" class RawChopper(sciline.Scope[RunType, sc.DataGroup], sc.DataGroup): - """Chopper data loaded from nexus file""" + """Chopper data loaded from nexus file.""" class ThetaBins(sciline.Scope[RunType, sc.Variable], sc.Variable): """Binning in theta that takes into consideration that some - detector pixels have the same theta value""" + detector pixels have the same theta value.""" WavelengthThetaFigure = NewType("WavelengthThetaFigure", Any) diff --git a/src/ess/amor/utils.py b/src/ess/amor/utils.py index aa36f944..a05accb2 100644 --- a/src/ess/amor/utils.py +++ b/src/ess/amor/utils.py @@ -10,19 +10,20 @@ from .types import ThetaBins -def theta_grid( +def angle_of_reflection_grid( nu: DetectorRotation[RunType], mu: SampleRotation[RunType] ) -> ThetaBins[RunType]: - """Special grid used to create intensity maps over (theta, wavelength). + """Special grid used to create intensity maps over + (angle_of_reflection, wavelength). The grid avoids aliasing artifacts that occur if the - theta bins overlap the blade edges.""" + angle_of_reflection bins overlap the blade edges.""" # angular offset of two blades: bladeAngle = 2.0 * sc.asin(0.5 * Detector.bladeZ / Detector.distance) # associate an angle with each z-coordinate on one blade blade_grid = sc.atan( - sc.arange("theta", 0, 33) + sc.arange("angle_of_reflection", 0, 33) * Detector.dZ - / (Detector.distance + sc.arange("theta", 0, 33) * Detector.dX) + / (Detector.distance + sc.arange("angle_of_reflection", 0, 33) * Detector.dX) ) # approximate angular step width on one blade stepWidth = blade_grid[1] - blade_grid[0] @@ -30,19 +31,22 @@ def theta_grid( blade_grid = blade_grid - 0.2 * stepWidth delta_grid = sc.array( - dims=["theta"], values=[], unit=blade_grid.unit, dtype=blade_grid.dtype + dims=["angle_of_reflection"], + values=[], + unit=blade_grid.unit, + dtype=blade_grid.dtype, ) # loop over all blades but one: for _ in range(Detector.nBlades.value - 1): # append the actual blade's grid to the array of detector-local angles - delta_grid = sc.concat((delta_grid, blade_grid), "theta") + delta_grid = sc.concat((delta_grid, blade_grid), "angle_of_reflection") # shift the blade grid by the angular offset blade_grid = blade_grid + bladeAngle # remove all entries in the detector local grid which are above the # expected next value (plus some space to avoid very thin bins) delta_grid = delta_grid[delta_grid < blade_grid[0] - 0.5 * stepWidth] # append the grid of the last blade. - delta_grid = sc.concat((delta_grid, blade_grid), "theta") + delta_grid = sc.concat((delta_grid, blade_grid), "angle_of_reflection") # add angular position of the detector grid = ( @@ -53,11 +57,7 @@ def theta_grid( ).to(unit="rad") + 0.5 * Detector.nBlades * bladeAngle.to(unit="rad") ) - # TODO: If theta filtering is added, use it here - # some filtering - # theta_grid = theta_grid[theta_grid>=thetaMin] - # theta_grid = theta_grid[theta_grid<=thetaMax] return grid -providers = (theta_grid,) +providers = (angle_of_reflection_grid,) diff --git a/src/ess/reflectometry/__init__.py b/src/ess/reflectometry/__init__.py index d0ec032d..6578fc76 100644 --- a/src/ess/reflectometry/__init__.py +++ b/src/ess/reflectometry/__init__.py @@ -4,19 +4,17 @@ import importlib.metadata -from . import calibrations, conversions, corrections, normalize, orso -from .load import load_reference, save_reference - try: __version__ = importlib.metadata.version("essreflectometry") except importlib.metadata.PackageNotFoundError: __version__ = "0.0.0" + +from . import conversions, orso +from .load import load_reference, save_reference + providers = ( *conversions.providers, - *corrections.providers, - *calibrations.providers, - *normalize.providers, *orso.providers, ) """ diff --git a/src/ess/reflectometry/calibrations.py b/src/ess/reflectometry/calibrations.py deleted file mode 100644 index 34f9d58e..00000000 --- a/src/ess/reflectometry/calibrations.py +++ /dev/null @@ -1,45 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2023 Scipp contributors (https://github.com/scipp) -import scipp as sc - -from . import supermirror -from .types import ReferenceIntensity - - -def calibration_factor( - da: ReferenceIntensity, - m_value: supermirror.MValue, - critical_edge: supermirror.CriticalEdge, - alpha: supermirror.Alpha, -) -> supermirror.SupermirrorReflectivityCorrection: - """ - Return the calibration factor for the supermirror. - - Parameters - ---------- - qbins: - edges of binning of Q. - m_value: - m-value for the supermirror. - critical_edge: - Supermirror critical edge. - alpha: - Supermirror alpha value. - - Returns - ------- - : - Calibration factor at the midpoint of each Q-bin. - """ - q = da.coords['Q'] - max_q = m_value * critical_edge - lim = (q <= critical_edge).astype(float) - lim.unit = 'one' - nq = 1.0 / (1.0 - alpha * (q - critical_edge)) - calibration_factor = sc.where( - q < max_q, lim + (1 - lim) * nq, sc.scalar(float('nan')) - ) - return calibration_factor - - -providers = (calibration_factor,) diff --git a/src/ess/reflectometry/conversions.py b/src/ess/reflectometry/conversions.py index 494699ee..6aef0278 100644 --- a/src/ess/reflectometry/conversions.py +++ b/src/ess/reflectometry/conversions.py @@ -2,99 +2,21 @@ # Copyright (c) 2023 Scipp contributors (https://github.com/scipp) import scipp as sc from scipp.constants import pi -from scippneutron._utils import elem_dtype, elem_unit -from scippneutron.conversion.beamline import scattering_angle_in_yz_plane -from scippneutron.conversion.graph import beamline, tof +from scippneutron._utils import elem_dtype -from .types import ( - BeamDivergenceLimits, - DataWithScatteringCoordinates, - DetectorRotation, - Gravity, - IncidentBeam, - MaskedData, - ReducibleDetectorData, - RunType, - SamplePosition, - SampleRotation, - SpecularReflectionCoordTransformGraph, - WavelengthBins, - YIndexLimits, - ZIndexLimits, -) - -def theta( - incident_beam: sc.Variable, - scattered_beam: sc.Variable, - wavelength: sc.Variable, - gravity: sc.Variable, - sample_rotation: sc.Variable, +def reflectometry_q( + wavelength: sc.Variable, angle_of_reflection: sc.Variable ) -> sc.Variable: - r"""Compute the scattering angle w.r.t. the sample plane. - - This function uses the definition given in - :func:`scippneutron.conversion.beamline.scattering_angle_in_yz_plane` - and includes the sample rotation :math:`\omega`: - - .. math:: - - \mathsf{tan}(\gamma) &= \frac{|y_d^{\prime}|}{z_d} \\ - \theta = \gamma - \omega - - with - - .. math:: - - y'_d = y_d + \frac{|g| m_n^2}{2 h^2} L_2^{\prime\, 2} \lambda^2 - - Attention - --------- - The above equation for :math:`y'_d` approximates :math:`L_2 \approx L'_2`. - See :func:`scippneutron.conversion.beamline.scattering_angle_in_yz_plane` - for more information. - - Parameters - ---------- - incident_beam: - Beam from source to sample. Expects ``dtype=vector3``. - scattered_beam: - Beam from sample to detector. Expects ``dtype=vector3``. - wavelength: - Wavelength of neutrons. - gravity: - Gravity vector. - sample_rotation: - The sample rotation angle :math:`\omega`. - - Returns - ------- - : - The polar scattering angle :math:`\theta`. """ - angle = scattering_angle_in_yz_plane( - incident_beam=incident_beam, - scattered_beam=scattered_beam, - wavelength=wavelength, - gravity=gravity, - ) - angle -= sample_rotation.to(unit=elem_unit(angle)) - return angle - - -def reflectometry_q(wavelength: sc.Variable, theta: sc.Variable) -> sc.Variable: - """ - Compute the Q vector from the theta angle computed as the difference - between gamma and omega. - Note that this is identical the 'normal' Q defined in scippneutron, except that - the `theta` angle is given as an input instead of `two_theta`. + Compute momentum transfer from reflection angle. Parameters ---------- wavelength: Wavelength values for the events. - theta: - Theta values, accounting for gravity. + angle_of_reflection: + Angle of reflection for the events. Returns ------- @@ -103,77 +25,7 @@ def reflectometry_q(wavelength: sc.Variable, theta: sc.Variable) -> sc.Variable: """ dtype = elem_dtype(wavelength) c = (4 * pi).astype(dtype) - return c * sc.sin(theta.astype(dtype, copy=False)) / wavelength - - -def specular_reflection( - incident_beam: IncidentBeam[RunType], - sample_position: SamplePosition[RunType], - sample_rotation: SampleRotation[RunType], - detector_rotation: DetectorRotation[RunType], - gravity: Gravity, -) -> SpecularReflectionCoordTransformGraph[RunType]: - """ - Generate a coordinate transformation graph for specular reflection reflectometry. - - Returns - ------- - : - Specular reflectometry graph. - """ - graph = { - **beamline.beamline(scatter=True), - **tof.elastic_wavelength("tof"), - "theta": theta, - "Q": reflectometry_q, - "incident_beam": lambda: incident_beam, - "sample_position": lambda: sample_position, - "sample_rotation": lambda: sample_rotation, - "detector_rotation": lambda: detector_rotation, - "gravity": lambda: gravity, - } - return SpecularReflectionCoordTransformGraph(graph) - - -def add_coords( - da: ReducibleDetectorData[RunType], - graph: SpecularReflectionCoordTransformGraph[RunType], -) -> DataWithScatteringCoordinates[RunType]: - da = da.transform_coords( - ["theta", "wavelength", "Q", "detector_rotation"], graph=graph - ) - da.coords.set_aligned('detector_rotation', False) - da.coords["z_index"] = sc.arange( - "row", 0, da.sizes["blade"] * da.sizes["wire"], unit=None - ).fold("row", sizes={dim: da.sizes[dim] for dim in ("blade", "wire")}) - da.coords["y_index"] = sc.arange("stripe", 0, da.sizes["stripe"], unit=None) - return da - - -def add_masks( - da: DataWithScatteringCoordinates[RunType], - ylim: YIndexLimits, - wb: WavelengthBins, - zlim: ZIndexLimits, - bdlim: BeamDivergenceLimits, -) -> MaskedData[RunType]: - da.masks["beam_divergence_too_large"] = ( - da.coords["angle_from_center_of_beam"] < bdlim[0] - ) | (da.coords["angle_from_center_of_beam"] > bdlim[1]) - da.masks["y_index_range"] = (da.coords["y_index"] < ylim[0]) | ( - da.coords["y_index"] > ylim[1] - ) - da.bins.masks["wavelength_mask"] = (da.bins.coords["wavelength"] < wb[0]) | ( - da.bins.coords["wavelength"] > wb[-1] - ) - da.masks["z_index_range"] = (da.coords["z_index"] < zlim[0]) | ( - da.coords["z_index"] > zlim[1] - ) - return da + return c * sc.sin(angle_of_reflection.astype(dtype, copy=False)) / wavelength -providers = ( - add_masks, - add_coords, - specular_reflection, -) +providers = () diff --git a/src/ess/reflectometry/corrections.py b/src/ess/reflectometry/corrections.py deleted file mode 100644 index 11d7486d..00000000 --- a/src/ess/reflectometry/corrections.py +++ /dev/null @@ -1,81 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2023 Scipp contributors (https://github.com/scipp) - -import scipp as sc - -from .supermirror import SupermirrorReflectivityCorrection -from .tools import fwhm_to_std -from .types import ( - BeamSize, - FootprintCorrectedData, - IdealReferenceIntensity, - MaskedData, - ReferenceIntensity, - ReferenceRun, - RunType, - SampleSize, - WavelengthBins, -) - - -def footprint_correction( - data_array: MaskedData[RunType], - beam_size: BeamSize[RunType], - sample_size: SampleSize[RunType], -) -> FootprintCorrectedData[RunType]: - """ - Corrects the event weights by the fraction of the beam hitting the sample. - Depends on :math:`\\theta`. - - Parameters - ---------- - data_array: - Data array to perform footprint correction on. - beam_size: - Full width half maximum of the beam. - sample_size: - Size of the sample. - TODO: check what sample size actually means. Is it the sample diameter? etc. - - Returns - ------- - : - Footprint corrected data array. - """ - size_of_beam_on_sample = beam_size / sc.sin(data_array.bins.coords["theta"]) - footprint_scale = sc.erf(fwhm_to_std(sample_size / size_of_beam_on_sample)) - data_array_fp_correction = data_array / footprint_scale - return FootprintCorrectedData[RunType](data_array_fp_correction) - - -def compute_reference_intensity( - da: FootprintCorrectedData[ReferenceRun], wb: WavelengthBins -) -> ReferenceIntensity: - """Creates a reference intensity map over (z_index, wavelength). - Rationale: - The intensity expressed in those variables should not vary - with the experiment parameters (such as sample rotation). - Therefore it can be used to normalize sample measurements. - """ - b = da.bin(wavelength=wb, dim=set(da.dims) - set(da.coords["z_index"].dims)) - h = b.hist() - h.masks["too_few_events"] = h.data < sc.scalar(1, unit="counts") - # Add a Q coordinate to each bin, the Q is not completely unique in every bin, - # but it is close enough. - h.coords["Q"] = b.bins.coords["Q"].bins.mean() - return ReferenceIntensity(h) - - -def calibrate_reference( - da: ReferenceIntensity, cal: SupermirrorReflectivityCorrection -) -> IdealReferenceIntensity: - """Calibrates the reference intensity by the - inverse of the supermirror reflectivity""" - return IdealReferenceIntensity(da * cal) - - -providers = ( - footprint_correction, - calibrate_reference, - compute_reference_intensity, -) diff --git a/src/ess/reflectometry/load.py b/src/ess/reflectometry/load.py index eec91fc9..b95fd812 100644 --- a/src/ess/reflectometry/load.py +++ b/src/ess/reflectometry/load.py @@ -5,7 +5,7 @@ import scipp as sc import scippnexus as snx -from .types import IdealReferenceIntensity, ReferenceFilePath +from .types import ReducedReference, ReferenceFilePath def load_nx(group: snx.Group | str, *paths: str): @@ -38,9 +38,9 @@ def _unique_child_group( def save_reference(pl: sciline.Pipeline, fname: str): - pl.compute(IdealReferenceIntensity).save_hdf5(fname) + pl.compute(ReducedReference).save_hdf5(fname) return fname -def load_reference(fname: ReferenceFilePath) -> IdealReferenceIntensity: +def load_reference(fname: ReferenceFilePath) -> ReducedReference: return sc.io.hdf5.load_hdf5(fname) diff --git a/src/ess/reflectometry/normalize.py b/src/ess/reflectometry/normalize.py deleted file mode 100644 index 18d56283..00000000 --- a/src/ess/reflectometry/normalize.py +++ /dev/null @@ -1,149 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2023 Scipp contributors (https://github.com/scipp) -import warnings - -import scipp as sc -from scipy.optimize import OptimizeWarning - -from .types import ( - FootprintCorrectedData, - IdealReferenceIntensity, - NormalizationFactor, - QBins, - QResolution, - ReflectivityData, - ReflectivityOverQ, - SampleRun, - WavelengthBins, -) - - -def normalization_factor( - da: FootprintCorrectedData[SampleRun], - corr: IdealReferenceIntensity, - wbins: WavelengthBins, -) -> NormalizationFactor: - """The correction matrix gives us the expected intensity at each - (z_index, wavelength) bin assuming the reflectivity is one. - To normalize the sample measurement we need to integrate the total - expected intensity in every Q-bin. - Note that Q refers to the 'sample-Q', different from the 'reference-Q'. - - The 'sample-Q' is computed taking the mean of the sample measurement Q - value in every (z_index, wavelength) bin. - One complication however is that some bins have zero intensity from the - sample measurement, so we are unable to assign a 'sample-Q' value to those bins. - Therefore we estimate the intensity in the missing bins by fitting the - 'sample-q' as a function of z_index and wavelength. - - Steps: - Approximate 'sample-q' in every (z_index, wavelength) bin - Fit 'sample-q'. - Compute 'sample-q' in all bins using the fit. - Return the reference intensity with the 'sample-q' as a coordinate. - - """ - sample_q = ( - da.bin(wavelength=wbins, dim=set(da.dims) - set(da.coords["z_index"].dims)) - .bins.coords["Q"] - .bins.mean() - ) - - wm = sc.midpoints(corr.coords["wavelength"]) - - def q_of_z_wavelength(wavelength, a, b): - return a + b / wavelength - - with warnings.catch_warnings(): - # `curve_fit` raises a warning if it fails to estimate variances. - # We don't care here because we only need the parameter values and anyway - # assume that the fit worked. - # The warning can be caused by there being too few points to estimate - # uncertainties because of masks. - warnings.filterwarnings( - "ignore", - message="Covariance of the parameters could not be estimated", - category=OptimizeWarning, - ) - p, _ = sc.curve_fit( - ["wavelength"], - q_of_z_wavelength, - sc.DataArray( - data=sample_q, - coords={"wavelength": wm}, - masks={ - **corr.masks, - "_sample_q_isnan": sc.isnan(sample_q), - }, - ), - p0={"a": sc.scalar(-1e-3, unit="1/angstrom")}, - ) - return sc.DataArray( - data=corr.data, - coords={ - "Q": q_of_z_wavelength( - wm, - sc.values(p["a"]), - sc.values(p["b"]), - ).data, - }, - masks=corr.masks, - ) - - -def reflectivity_over_q( - da: FootprintCorrectedData[SampleRun], - n: NormalizationFactor, - qbins: QBins, - qres: QResolution, -) -> ReflectivityOverQ: - """ - Normalize the sample measurement by the (ideally calibrated) supermirror. - - Parameters - ---------- - sample: - Sample measurement with coord 'Q' - supermirror: - Supermirror measurement with coord of 'Q' representing the sample 'Q' - - Returns - ------- - : - Reflectivity as a function of Q - """ - reflectivity = da.bin(Q=qbins, dim=da.dims) / sc.values(n.hist(Q=qbins, dim=n.dims)) - reflectivity.coords['Q_resolution'] = qres.data - for coord, value in da.coords.items(): - if ( - not isinstance(value, sc.Variable) - or len(set(value.dims) - set(reflectivity.dims)) == 0 - ): - reflectivity.coords[coord] = value - return ReflectivityOverQ(reflectivity) - - -def reflectivity_per_event( - da: FootprintCorrectedData[SampleRun], - n: IdealReferenceIntensity, - wbins: WavelengthBins, -) -> ReflectivityData: - """ - Weight the sample measurement by the (ideally calibrated) supermirror. - - Returns: - reflectivity "per event" - """ - reflectivity = da.bin(wavelength=wbins, dim=set(da.dims) - set(n.dims)) / sc.values( - n - ) - for coord, value in da.coords.items(): - if ( - not isinstance(value, sc.Variable) - or len(set(value.dims) - set(reflectivity.dims)) == 0 - ): - reflectivity.coords[coord] = value - return ReflectivityData(reflectivity) - - -providers = (reflectivity_over_q, normalization_factor, reflectivity_per_event) diff --git a/src/ess/reflectometry/orso.py b/src/ess/reflectometry/orso.py index 6244f9b4..e4bdcc2d 100644 --- a/src/ess/reflectometry/orso.py +++ b/src/ess/reflectometry/orso.py @@ -17,11 +17,8 @@ from orsopy.fileio import data_source, orso, reduction from .load import load_nx -from .supermirror import SupermirrorReflectivityCorrection from .types import ( Filename, - FootprintCorrectedData, - ReducibleDetectorData, ReferenceRun, SampleRun, ) @@ -182,9 +179,9 @@ def build_orso_data_source( _CORRECTIONS_BY_GRAPH_KEY = { - ReducibleDetectorData[SampleRun]: "chopper ToF correction", - FootprintCorrectedData[SampleRun]: "footprint correction", - SupermirrorReflectivityCorrection: "supermirror calibration", + # ReducibleDetectorData[SampleRun]: "chopper ToF correction", + # FootprintCorrectedData[SampleRun]: "footprint correction", + # SupermirrorReflectivityCorrection: "supermirror calibration", } diff --git a/src/ess/reflectometry/supermirror/__init__.py b/src/ess/reflectometry/supermirror/__init__.py index b1a03792..ac8ad97b 100644 --- a/src/ess/reflectometry/supermirror/__init__.py +++ b/src/ess/reflectometry/supermirror/__init__.py @@ -1,4 +1,36 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright (c) 2023 Scipp contributors (https://github.com/scipp) # flake8: noqa: F401 +import scipp as sc +import numpy as np + from .types import Alpha, CriticalEdge, MValue, SupermirrorReflectivityCorrection + + +def supermirror_reflectivity(q: sc.Variable, c: CriticalEdge, m: MValue, alpha: Alpha): + """ + Returns the reflectivity of the supermirror. + For ``q`` outside of the region of known reflectivity + this function returns ``nan``. + + Parameters + ---------- + q: + Momentum transfer. + m_value: + m-value for the supermirror. + critical_edge: + Supermirror critical edge. + alpha: + Supermirror alpha value. + + Returns + ------- + : + Reflectivity of the supermirror at q. + """ + return sc.where( + q < c, + sc.scalar(1.0), + sc.where(q < m * c, sc.scalar(1) - alpha * (q - c), sc.scalar(np.nan)), + ) diff --git a/src/ess/reflectometry/tools.py b/src/ess/reflectometry/tools.py index a5a5739f..40941642 100644 --- a/src/ess/reflectometry/tools.py +++ b/src/ess/reflectometry/tools.py @@ -13,46 +13,6 @@ from ess.reflectometry import orso from ess.reflectometry.types import ReflectivityOverQ -_STD_TO_FWHM = sc.scalar(2.0) * sc.sqrt(sc.scalar(2.0) * sc.log(sc.scalar(2.0))) - - -def fwhm_to_std(fwhm: sc.Variable) -> sc.Variable: - """ - Convert from full-width half maximum to standard deviation. - - Parameters - ---------- - fwhm: - Full-width half maximum. - - Returns - ------- - : - Standard deviation. - """ - # Enables the conversion from full width half - # maximum to standard deviation - return fwhm / _STD_TO_FWHM - - -def std_to_fwhm(std: sc.Variable) -> sc.Variable: - """ - Convert from standard deviation to full-width half maximum. - - Parameters - ---------- - std: - Standard deviation. - - Returns - ------- - : - Full-width half maximum. - """ - # Enables the conversion from full width half - # maximum to standard deviation - return std * _STD_TO_FWHM - def linlogspace( dim: str, @@ -168,9 +128,14 @@ def _create_qgrid_where_overlapping(qgrids): return sc.concat(pieces, dim='Q') +def _same_dtype(arrays): + return [arr.to(dtype='float64') for arr in arrays] + + def _interpolate_on_qgrid(curves, grid): return sc.concat( - [sc.lookup(c, grid.dim)[sc.midpoints(grid)] for c in curves], dim='curves' + _same_dtype([sc.lookup(c, grid.dim)[sc.midpoints(grid)] for c in curves]), + dim='curves', ) diff --git a/src/ess/reflectometry/types.py b/src/ess/reflectometry/types.py index 73c07213..32cae2c1 100644 --- a/src/ess/reflectometry/types.py +++ b/src/ess/reflectometry/types.py @@ -20,10 +20,6 @@ class SamplePosition(sciline.Scope[RunType, sc.Variable], sc.Variable): """The position of the sample relative to the source(?).""" -class IncidentBeam(sciline.Scope[RunType, sc.Variable], sc.Variable): - """Incident beam vector.""" - - class SpecularReflectionCoordTransformGraph(sciline.Scope[RunType, dict], dict): """Coordinate transformation graph for specular reflection""" @@ -37,40 +33,23 @@ class LoadedNeXusDetector(sciline.Scope[RunType, sc.DataGroup], sc.DataGroup): """NXdetector loaded from file""" -class ReducibleDetectorData(sciline.Scope[RunType, sc.DataArray], sc.DataArray): - """Event time data after correcting tof, ready for reduction""" - - -class DataWithScatteringCoordinates(sciline.Scope[RunType, sc.DataArray], sc.DataArray): - """Event data with added coordinates such as incident angle (theta), - wavelength, and momentum transfer (Q)""" - - -class MaskedData(sciline.Scope[RunType, sc.DataArray], sc.DataArray): - """Event data that has been masked in wavelength and logical detector coordinates""" +class ReducibleData(sciline.Scope[RunType, sc.DataArray], sc.DataArray): + """Event data with common coordinates added""" -class FootprintCorrectedData(sciline.Scope[RunType, sc.DataArray], sc.DataArray): - """Event data with weights corrected for the footprint of the beam - on the sample for the incidence angle of the event.""" - - -ReferenceIntensity = NewType("ReferenceIntensity", sc.DataArray) -"""Intensity distribution of the reference measurement in (z, wavelength)""" - -IdealReferenceIntensity = NewType("IdealReferenceIntensity", sc.DataArray) +ReducedReference = NewType("ReducedReference", sc.DataArray) """Intensity distribution on the detector for a sample with :math`R(Q) = 1`""" -NormalizationFactor = NewType("NormalizationFactor", sc.DataArray) -""":code`IdealReferenceIntensity` with added coordinate "sample"-Q""" +Reference = NewType("NormalizationFactor", sc.DataArray) +""":code`ReducedReference` histogrammed in sample ``Q``""" ReflectivityOverQ = NewType("ReflectivityOverQ", sc.DataArray) """Intensity histogram over momentum transfer normalized by the calibrated reference measurement.""" -ReflectivityData = NewType("ReflectivityData", sc.DataArray) -"""Reflectivity "per event". Event data weighted by the expected -intensity at the coordinates of the event.""" +ReflectivityOverZW = NewType("ReflectivityOverZW", sc.DataArray) +"""Intensity histogram over z- and wavelength- grid. +normalized by the calibrated reference measurement.""" QResolution = NewType("QResolution", sc.Variable) """Resolution term for the momentum transfer for each bin of QBins.""" @@ -111,11 +90,6 @@ class SampleSize(sciline.Scope[RunType, sc.Variable], sc.Variable): """Size of the sample. If None it is assumed to be the same as the reference.""" -Gravity = NewType("Gravity", sc.Variable) -"""This parameter determines if gravity is taken into account -when computing the scattering angle and momentum transfer.""" - - YIndexLimits = NewType("YIndexLimits", tuple[sc.Variable, sc.Variable]) """Limit of the (logical) 'y' detector pixel index""" diff --git a/src/ess/reflectometry/workflow.py b/src/ess/reflectometry/workflow.py index c489b2cb..1af48cfb 100644 --- a/src/ess/reflectometry/workflow.py +++ b/src/ess/reflectometry/workflow.py @@ -14,7 +14,7 @@ ) from ess.reflectometry.types import ( Filename, - FootprintCorrectedData, + ReducibleData, RunType, SampleRotation, SampleRun, @@ -68,9 +68,9 @@ def with_filenames( mapped = wf.map(df) - wf[FootprintCorrectedData[runtype]] = mapped[ - FootprintCorrectedData[runtype] - ].reduce(index=axis_name, func=_concatenate_event_lists) + wf[ReducibleData[runtype]] = mapped[ReducibleData[runtype]].reduce( + index=axis_name, func=_concatenate_event_lists + ) wf[RawChopper[runtype]] = mapped[RawChopper[runtype]].reduce( index=axis_name, func=_any_value ) From 3776e33c046f1afb17911658c7340193cbdb14c9 Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Thu, 28 Nov 2024 18:08:28 +0100 Subject: [PATCH 02/37] use transform_coords --- src/ess/amor/conversions.py | 36 ++++++++++++++-------------- src/ess/amor/normalization.py | 45 +++++++++++------------------------ 2 files changed, 32 insertions(+), 49 deletions(-) diff --git a/src/ess/amor/conversions.py b/src/ess/amor/conversions.py index 706ed647..75c12599 100644 --- a/src/ess/amor/conversions.py +++ b/src/ess/amor/conversions.py @@ -3,11 +3,13 @@ import numpy as np import scipp as sc +from ..reflectometry.conversions import reflectometry_q from ..reflectometry.types import ( BeamDivergenceLimits, RawDetectorData, ReducibleData, RunType, + WavelengthBins, YIndexLimits, ) @@ -58,25 +60,19 @@ def add_common_coords_and_masks( da: RawDetectorData[RunType], ylim: YIndexLimits, bdlim: BeamDivergenceLimits, + wbins: WavelengthBins, ) -> ReducibleData[RunType]: - da.bins.coords["wavelength"] = wavelength( - event_time_offset=da.bins.coords["event_time_offset"], - divergence_angle=da.coords["pixel_divergence_angle"], - L1=da.coords["L1"], - L2=da.coords["L2"], - chopper_phase=da.coords["chopper_phase"], - chopper_frequency=da.coords["chopper_frequency"], - ) - da.bins.coords["angle_of_reflection"] = angle_of_reflection( - wavelength=da.bins.coords["wavelength"], - divergence_angle=da.coords["pixel_divergence_angle"], - L2=da.coords["L2"], - sample_rotation=da.coords["sample_rotation"], - detector_rotation=da.coords["detector_rotation"], - ) - da.bins.coords["angle_of_divergence"] = angle_of_divergence( - angle_of_reflection=da.bins.coords["angle_of_reflection"], - sample_rotation=da.coords["sample_rotation"], + da = da.transform_coords( + ("wavelength", "angle_of_reflection", "angle_of_divergence", "Q"), + { + "divergence_angle": "pixel_divergence_angle", + "wavelength": wavelength, + "angle_of_reflection": angle_of_reflection, + "angle_of_divergence": angle_of_divergence, + "Q": reflectometry_q, + }, + rename_dims=False, + keep_intermediate=False, ) da.masks["stripe_range"] = (da.coords["stripe"] < ylim[0]) | ( da.coords["stripe"] > ylim[1] @@ -88,6 +84,10 @@ def add_common_coords_and_masks( da.bins.coords["angle_of_divergence"] > bdlim[1].to(unit=da.bins.coords["angle_of_divergence"].bins.unit) ) + da.bins.masks['wavelength'] = (wbins[0] > da.bins.coords['wavelength']) | ( + wbins[-1] < da.bins.coords['wavelength'] + ) + # Footprint correction da /= sc.sin(da.bins.coords['angle_of_reflection']) return da diff --git a/src/ess/amor/normalization.py b/src/ess/amor/normalization.py index 02dba9f4..70621136 100644 --- a/src/ess/amor/normalization.py +++ b/src/ess/amor/normalization.py @@ -37,10 +37,7 @@ def reduce_reference( alpha: Alpha, ) -> ReducedReference: R = supermirror_reflectivity( - reflectometry_q( - wavelength=reference.bins.coords['wavelength'], - angle_of_reflection=reference.bins.coords['angle_of_reflection'], - ), + reference.bins.coords['Q'], c=critical_edge, m=mvalue, alpha=alpha, @@ -54,17 +51,9 @@ def reduce_sample_over_q( sample: ReducibleData[SampleRun], reference: Reference, qbins: QBins, - wavelength_bins: WavelengthBins, zlims: ZIndexLimits, qresolution: QResolution, ) -> ReflectivityOverQ: - sample.bins.coords['Q'] = reflectometry_q( - sample.bins.coords['wavelength'], - sample.bins.coords['angle_of_reflection'], - ) - sample.bins.masks['wavelength'] = ( - wavelength_bins[0] > sample.bins.coords['wavelength'] - ) | (wavelength_bins[-1] < sample.bins.coords['wavelength']) _add_pre_reduction_masks(sample, zlims) R = sample.bins.concat().bin(Q=qbins) / reference.flatten(to='Q').hist(Q=qbins).data R.coords['Q_resolution'] = qresolution.data @@ -74,18 +63,11 @@ def reduce_sample_over_q( def reduce_sample_over_zw( sample: ReducibleData[SampleRun], reference: Reference, - wavelength_bins: WavelengthBins, zlims: ZIndexLimits, + wbins: WavelengthBins, ) -> ReflectivityOverZW: - sample.bins.coords['Q'] = reflectometry_q( - sample.bins.coords['wavelength'], - sample.bins.coords['angle_of_reflection'], - ) - sample.bins.masks['wavelength'] = ( - wavelength_bins[0] > sample.bins.coords['wavelength'] - ) | (wavelength_bins[-1] < sample.bins.coords['wavelength']) _add_pre_reduction_masks(sample, zlims) - R = sample.bins.concat(('stripe',)).bin(wavelength=wavelength_bins) / reference.data + R = sample.bins.concat(('stripe',)).bin(wavelength=wbins) / reference.data R.masks["too_few_events"] = reference.data < sc.scalar(1, unit="counts") return R @@ -97,16 +79,17 @@ def evaluate_reference( zlims: ZIndexLimits, ) -> Reference: ref = reference.copy() - ref.coords['angle_of_reflection'] = angle_of_reflection( - wavelength=sc.midpoints(ref.coords["wavelength"]), - divergence_angle=ref.coords["pixel_divergence_angle"], - L2=ref.coords["L2"], - sample_rotation=sample.coords["sample_rotation"], - detector_rotation=sample.coords["detector_rotation"], - ) - ref.coords['Q'] = reflectometry_q( - wavelength=sc.midpoints(ref.coords["wavelength"]), - angle_of_reflection=ref.coords["angle_of_reflection"], + ref.coords["sample_rotation"] = sample.coords["sample_rotation"] + ref.coords["detector_rotation"] = sample.coords["detector_rotation"] + ref.coords["wavelength"] = sc.midpoints(ref.coords["wavelength"]) + ref = ref.transform_coords( + ("angle_of_reflection", "Q"), + { + "divergence_angle": "pixel_divergence_angle", + "angle_of_reflection": angle_of_reflection, + "Q": reflectometry_q, + }, + rename_dims=False, ) _add_pre_reduction_masks(ref, zlims) return sc.values(ref) From 320ecd26a097ba372986511403ae65f9760c4160 Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Fri, 29 Nov 2024 12:06:28 +0100 Subject: [PATCH 03/37] rename back to theta --- src/ess/amor/conversions.py | 14 ++-- src/ess/amor/figures.py | 108 ++++++++++++--------------- src/ess/amor/normalization.py | 6 +- src/ess/amor/orso.py | 4 +- src/ess/amor/resolution.py | 10 +-- src/ess/amor/utils.py | 18 ++--- src/ess/reflectometry/conversions.py | 8 +- 7 files changed, 74 insertions(+), 94 deletions(-) diff --git a/src/ess/amor/conversions.py b/src/ess/amor/conversions.py index 75c12599..d08708bb 100644 --- a/src/ess/amor/conversions.py +++ b/src/ess/amor/conversions.py @@ -14,9 +14,7 @@ ) -def angle_of_reflection( - wavelength, divergence_angle, L2, sample_rotation, detector_rotation -): +def theta(wavelength, divergence_angle, L2, sample_rotation, detector_rotation): c = sc.constants.g * sc.constants.m_n**2 / sc.constants.h**2 out = (c * L2 * wavelength**2).to(unit='dimensionless') + sc.sin( divergence_angle + detector_rotation @@ -26,8 +24,8 @@ def angle_of_reflection( return out -def angle_of_divergence(angle_of_reflection, sample_rotation): - return angle_of_reflection - sample_rotation +def angle_of_divergence(theta, sample_rotation): + return theta - sample_rotation def wavelength( @@ -63,11 +61,11 @@ def add_common_coords_and_masks( wbins: WavelengthBins, ) -> ReducibleData[RunType]: da = da.transform_coords( - ("wavelength", "angle_of_reflection", "angle_of_divergence", "Q"), + ("wavelength", "theta", "angle_of_divergence", "Q"), { "divergence_angle": "pixel_divergence_angle", "wavelength": wavelength, - "angle_of_reflection": angle_of_reflection, + "theta": theta, "angle_of_divergence": angle_of_divergence, "Q": reflectometry_q, }, @@ -89,7 +87,7 @@ def add_common_coords_and_masks( ) # Footprint correction - da /= sc.sin(da.bins.coords['angle_of_reflection']) + da /= sc.sin(da.bins.coords['theta']) return da diff --git a/src/ess/amor/figures.py b/src/ess/amor/figures.py index e254de78..5fcd1643 100644 --- a/src/ess/amor/figures.py +++ b/src/ess/amor/figures.py @@ -18,7 +18,7 @@ WavelengthThetaFigure, WavelengthZIndexFigure, ) -from .utils import angle_of_reflection_grid +from .utils import theta_grid def _reshape_array_to_expected_shape(da, dims, **bins): @@ -52,40 +52,39 @@ def _repeat_variable_argument(n, arg): ) -def _try_to_create_angle_of_reflection_grid_if_missing(bins, da): +def _try_to_create_theta_grid_if_missing(bins, da): if ( - 'angle_of_reflection' not in bins - and 'angle_of_reflection' not in da.dims + 'theta' not in bins + and 'theta' not in da.dims and 'sample_rotation' in da.coords and 'detector_rotation' in da.coords ): - bins['angle_of_reflection'] = angle_of_reflection_grid( + bins['theta'] = theta_grid( nu=da.coords['detector_rotation'], mu=da.coords['sample_rotation'] ) -def wavelength_angle_of_reflection_figure( +def wavelength_theta_figure( da: sc.DataArray | Sequence[sc.DataArray], *, wavelength_bins: (sc.Variable | None) | Sequence[sc.Variable | None] = None, - angle_of_reflection_bins: (sc.Variable | None) - | Sequence[sc.Variable | None] = None, + theta_bins: (sc.Variable | None) | Sequence[sc.Variable | None] = None, q_edges_to_display: Sequence[sc.Variable] = (), linewidth: float = 1.0, **kwargs, ): ''' - Creates a figure displaying a histogram over :math:`\\angle_of_reflection` + Creates a figure displaying a histogram over :math:`\\theta` and :math:`\\lambda`. The input can either be a single data array containing the data to display, or a sequence of data arrays. - The inputs must either have coordinates called "angle_of_reflection" + The inputs must either have coordinates called "theta" and "wavelength", or they must be histograms with dimensions - "angle_of_reflection" and "wavelength". + "theta" and "wavelength". - If :code:`wavelength_bins` or :code:`angle_of_reflection_bins` are provided, + If :code:`wavelength_bins` or :code:`theta_bins` are provided, they are used to construct the histogram. If not provided, the function uses the bin edges that already exist on the data arrays. @@ -98,8 +97,8 @@ def wavelength_angle_of_reflection_figure( Data arrays to display. wavelength_bins : array-like, optional Bins used to histogram the data in wavelength. - angle_of_reflection_bins : array-like, optional - Bins used to histogram the data in angle_of_reflection. + theta_bins : array-like, optional + Bins used to histogram the data in theta. q_edges_to_display : sequence of float, optional Values of :math:`Q` to be displayed as straight lines in the figure. linewidth : float, optional @@ -114,44 +113,39 @@ def wavelength_angle_of_reflection_figure( ''' if isinstance(da, sc.DataArray): - return wavelength_angle_of_reflection_figure( + return wavelength_theta_figure( (da,), wavelength_bins=(wavelength_bins,), - angle_of_reflection_bins=(angle_of_reflection_bins,), + theta_bins=(theta_bins,), q_edges_to_display=q_edges_to_display, **kwargs, ) - wavelength_bins, angle_of_reflection_bins = ( - _repeat_variable_argument(len(da), arg) - for arg in (wavelength_bins, angle_of_reflection_bins) + wavelength_bins, theta_bins = ( + _repeat_variable_argument(len(da), arg) for arg in (wavelength_bins, theta_bins) ) hs = [] - for d, wavelength_bin, angle_of_reflection_bin in zip( - da, wavelength_bins, angle_of_reflection_bins, strict=True + for d, wavelength_bin, theta_bin in zip( + da, wavelength_bins, theta_bins, strict=True ): bins = {} if wavelength_bin is not None: bins['wavelength'] = wavelength_bin - if angle_of_reflection_bin is not None: - bins['angle_of_reflection'] = angle_of_reflection_bin + if theta_bin is not None: + bins['theta'] = theta_bin - _try_to_create_angle_of_reflection_grid_if_missing(bins, d) + _try_to_create_theta_grid_if_missing(bins, d) - hs.append( - _reshape_array_to_expected_shape( - d, ('angle_of_reflection', 'wavelength'), **bins - ) - ) + hs.append(_reshape_array_to_expected_shape(d, ('theta', 'wavelength'), **bins)) kwargs.setdefault('cbar', True) kwargs.setdefault('norm', 'log') p = pp.imagefigure(*(pp.Node(h) for h in hs), **kwargs) for q in q_edges_to_display: - thmax = max(h.coords["angle_of_reflection"].max() for h in hs) + thmax = max(h.coords["theta"].max() for h in hs) p.ax.plot( [0.0, 4 * np.pi * (sc.sin(thmax) / q).value], [0.0, thmax.value], @@ -163,25 +157,24 @@ def wavelength_angle_of_reflection_figure( return p -def q_angle_of_reflection_figure( +def q_theta_figure( da: sc.DataArray | Sequence[sc.DataArray], *, q_bins: (sc.Variable | None) | Sequence[sc.Variable | None] = None, - angle_of_reflection_bins: (sc.Variable | None) - | Sequence[sc.Variable | None] = None, + theta_bins: (sc.Variable | None) | Sequence[sc.Variable | None] = None, **kwargs, ): ''' - Creates a figure displaying a histogram over :math:`\\angle_of_reflection` + Creates a figure displaying a histogram over :math:`\\theta` and :math:`Q`. The input can either be a single data array containing the data to display, or a sequence of data arrays. - The inputs must either have coordinates called "angle_of_reflection" and "Q", - or they must be histograms with dimensions "angle_of_reflection" and "Q". + The inputs must either have coordinates called "theta" and "Q", + or they must be histograms with dimensions "theta" and "Q". - If :code:`angle_of_reflection_bins` or :code:`q_bins` are provided, they are used + If :code:`theta_bins` or :code:`q_bins` are provided, they are used to construct the histogram. If not provided, the function uses the bin edges that already exist on the data arrays. @@ -191,8 +184,8 @@ def q_angle_of_reflection_figure( Data arrays to display. q_bins : array-like, optional Bins used to histogram the data in Q. - angle_of_reflection_bins : array-like, optional - Bins used to histogram the data in angle_of_reflection. + theta_bins : array-like, optional + Bins used to histogram the data in theta. Returns ------- @@ -200,35 +193,30 @@ def q_angle_of_reflection_figure( ''' if isinstance(da, sc.DataArray): - return q_angle_of_reflection_figure( + return q_theta_figure( (da,), q_bins=(q_bins,), - angle_of_reflection_bins=(angle_of_reflection_bins,), + theta_bins=(theta_bins,), **kwargs, ) - q_bins, angle_of_reflection_bins = ( - _repeat_variable_argument(len(da), arg) - for arg in (q_bins, angle_of_reflection_bins) + q_bins, theta_bins = ( + _repeat_variable_argument(len(da), arg) for arg in (q_bins, theta_bins) ) hs = [] - for d, q_bin, angle_of_reflection_bin in zip( - da, q_bins, angle_of_reflection_bins, strict=True - ): + for d, q_bin, theta_bin in zip(da, q_bins, theta_bins, strict=True): bins = {} if q_bin is not None: bins['Q'] = q_bin - if angle_of_reflection_bin is not None: - bins['angle_of_reflection'] = angle_of_reflection_bin + if theta_bin is not None: + bins['theta'] = theta_bin - _try_to_create_angle_of_reflection_grid_if_missing(bins, d) + _try_to_create_theta_grid_if_missing(bins, d) - hs.append( - _reshape_array_to_expected_shape(d, ('angle_of_reflection', 'Q'), **bins) - ) + hs.append(_reshape_array_to_expected_shape(d, ('theta', 'Q'), **bins)) kwargs.setdefault('cbar', True) kwargs.setdefault('norm', 'log') @@ -290,21 +278,19 @@ def wavelength_z_figure( return pp.imagefigure(*(pp.Node(h) for h in hs), **kwargs) -def wavelength_angle_of_reflection_diagnostic_figure( +def wavelength_theta_diagnostic_figure( da: ReflectivityOverZW, thbins: ThetaBins[SampleRun], ) -> WavelengthThetaFigure: - return wavelength_angle_of_reflection_figure(da, angle_of_reflection_bins=thbins) + return wavelength_theta_figure(da, theta_bins=thbins) -def q_angle_of_reflection_diagnostic_figure( +def q_theta_diagnostic_figure( da: ReflectivityOverZW, thbins: ThetaBins[SampleRun], qbins: QBins, ) -> QThetaFigure: - return q_angle_of_reflection_figure( - da, q_bins=qbins, angle_of_reflection_bins=thbins - ) + return q_theta_figure(da, q_bins=qbins, theta_bins=thbins) def wavelength_z_diagnostic_figure( @@ -325,7 +311,7 @@ def diagnostic_view( providers = ( wavelength_z_diagnostic_figure, - wavelength_angle_of_reflection_diagnostic_figure, - q_angle_of_reflection_diagnostic_figure, + wavelength_theta_diagnostic_figure, + q_theta_diagnostic_figure, diagnostic_view, ) diff --git a/src/ess/amor/normalization.py b/src/ess/amor/normalization.py index 70621136..8dfefbd2 100644 --- a/src/ess/amor/normalization.py +++ b/src/ess/amor/normalization.py @@ -20,7 +20,7 @@ WavelengthBins, ZIndexLimits, ) -from .conversions import angle_of_reflection +from .conversions import theta def _add_pre_reduction_masks(da, zindex_limits): @@ -83,10 +83,10 @@ def evaluate_reference( ref.coords["detector_rotation"] = sample.coords["detector_rotation"] ref.coords["wavelength"] = sc.midpoints(ref.coords["wavelength"]) ref = ref.transform_coords( - ("angle_of_reflection", "Q"), + ("theta", "Q"), { "divergence_angle": "pixel_divergence_angle", - "angle_of_reflection": angle_of_reflection, + "theta": theta, "Q": reflectometry_q, }, rename_dims=False, diff --git a/src/ess/amor/orso.py b/src/ess/amor/orso.py index 038d8283..970f2a09 100644 --- a/src/ess/amor/orso.py +++ b/src/ess/amor/orso.py @@ -26,9 +26,7 @@ def build_orso_instrument(events: ReflectivityOverQ) -> OrsoInstrument: return OrsoInstrument( orso_data_source.InstrumentSettings( wavelength=orso_base.ValueRange(*_limits_of_coord(events, "wavelength")), - incident_angle=orso_base.ValueRange( - *_limits_of_coord(events, "angle_of_reflection") - ), + incident_angle=orso_base.ValueRange(*_limits_of_coord(events, "theta")), polarization=None, # TODO how can we determine this from the inputs? ) ) diff --git a/src/ess/amor/resolution.py b/src/ess/amor/resolution.py index a1debe1a..23255a8c 100644 --- a/src/ess/amor/resolution.py +++ b/src/ess/amor/resolution.py @@ -105,7 +105,7 @@ def _sample_size_resolution( def angular_resolution( - angle_of_reflection, + theta, L2, detector_spatial_resolution, ): @@ -115,7 +115,7 @@ def angular_resolution( Parameters ---------- - angle_of_reflection: + theta: Angle of reflection. L2: Distance between sample and detector. @@ -133,8 +133,8 @@ def angular_resolution( detector_spatial_resolution / L2.to(unit=detector_spatial_resolution.unit) ) - ).to(unit=angle_of_reflection.unit) - / angle_of_reflection + ).to(unit=theta.unit) + / theta ) @@ -142,7 +142,7 @@ def _angular_resolution( da: Reference, detector_spatial_resolution: DetectorSpatialResolution[SampleRun] ) -> AngularResolution: return angular_resolution( - angle_of_reflection=da.coords['angle_of_reflection'], + theta=da.coords['theta'], L2=da.coords['L2'], detector_spatial_resolution=detector_spatial_resolution, ) diff --git a/src/ess/amor/utils.py b/src/ess/amor/utils.py index a05accb2..4b08e755 100644 --- a/src/ess/amor/utils.py +++ b/src/ess/amor/utils.py @@ -10,20 +10,20 @@ from .types import ThetaBins -def angle_of_reflection_grid( +def theta_grid( nu: DetectorRotation[RunType], mu: SampleRotation[RunType] ) -> ThetaBins[RunType]: """Special grid used to create intensity maps over - (angle_of_reflection, wavelength). + (theta, wavelength). The grid avoids aliasing artifacts that occur if the - angle_of_reflection bins overlap the blade edges.""" + theta bins overlap the blade edges.""" # angular offset of two blades: bladeAngle = 2.0 * sc.asin(0.5 * Detector.bladeZ / Detector.distance) # associate an angle with each z-coordinate on one blade blade_grid = sc.atan( - sc.arange("angle_of_reflection", 0, 33) + sc.arange("theta", 0, 33) * Detector.dZ - / (Detector.distance + sc.arange("angle_of_reflection", 0, 33) * Detector.dX) + / (Detector.distance + sc.arange("theta", 0, 33) * Detector.dX) ) # approximate angular step width on one blade stepWidth = blade_grid[1] - blade_grid[0] @@ -31,7 +31,7 @@ def angle_of_reflection_grid( blade_grid = blade_grid - 0.2 * stepWidth delta_grid = sc.array( - dims=["angle_of_reflection"], + dims=["theta"], values=[], unit=blade_grid.unit, dtype=blade_grid.dtype, @@ -39,14 +39,14 @@ def angle_of_reflection_grid( # loop over all blades but one: for _ in range(Detector.nBlades.value - 1): # append the actual blade's grid to the array of detector-local angles - delta_grid = sc.concat((delta_grid, blade_grid), "angle_of_reflection") + delta_grid = sc.concat((delta_grid, blade_grid), "theta") # shift the blade grid by the angular offset blade_grid = blade_grid + bladeAngle # remove all entries in the detector local grid which are above the # expected next value (plus some space to avoid very thin bins) delta_grid = delta_grid[delta_grid < blade_grid[0] - 0.5 * stepWidth] # append the grid of the last blade. - delta_grid = sc.concat((delta_grid, blade_grid), "angle_of_reflection") + delta_grid = sc.concat((delta_grid, blade_grid), "theta") # add angular position of the detector grid = ( @@ -60,4 +60,4 @@ def angle_of_reflection_grid( return grid -providers = (angle_of_reflection_grid,) +providers = (theta_grid,) diff --git a/src/ess/reflectometry/conversions.py b/src/ess/reflectometry/conversions.py index 6aef0278..8e5dca25 100644 --- a/src/ess/reflectometry/conversions.py +++ b/src/ess/reflectometry/conversions.py @@ -5,9 +5,7 @@ from scippneutron._utils import elem_dtype -def reflectometry_q( - wavelength: sc.Variable, angle_of_reflection: sc.Variable -) -> sc.Variable: +def reflectometry_q(wavelength: sc.Variable, theta: sc.Variable) -> sc.Variable: """ Compute momentum transfer from reflection angle. @@ -15,7 +13,7 @@ def reflectometry_q( ---------- wavelength: Wavelength values for the events. - angle_of_reflection: + theta: Angle of reflection for the events. Returns @@ -25,7 +23,7 @@ def reflectometry_q( """ dtype = elem_dtype(wavelength) c = (4 * pi).astype(dtype) - return c * sc.sin(angle_of_reflection.astype(dtype, copy=False)) / wavelength + return c * sc.sin(theta.astype(dtype, copy=False)) / wavelength providers = () From 47dd79f30bd8308599a379a5d4c1e149d922f32e Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Fri, 29 Nov 2024 13:26:51 +0100 Subject: [PATCH 04/37] fix: re-introduce footprint correction --- src/ess/amor/conversions.py | 13 +++++++++--- src/ess/amor/resolution.py | 22 +------------------- src/ess/reflectometry/corrections.py | 31 ++++++++++++++++++++++++++++ src/ess/reflectometry/tools.py | 21 +++++++++++++++++++ 4 files changed, 63 insertions(+), 24 deletions(-) create mode 100644 src/ess/reflectometry/corrections.py diff --git a/src/ess/amor/conversions.py b/src/ess/amor/conversions.py index d08708bb..37f53ef9 100644 --- a/src/ess/amor/conversions.py +++ b/src/ess/amor/conversions.py @@ -4,11 +4,14 @@ import scipp as sc from ..reflectometry.conversions import reflectometry_q +from ..reflectometry.corrections import footprint_on_sample from ..reflectometry.types import ( BeamDivergenceLimits, + BeamSize, RawDetectorData, ReducibleData, RunType, + SampleSize, WavelengthBins, YIndexLimits, ) @@ -59,6 +62,8 @@ def add_common_coords_and_masks( ylim: YIndexLimits, bdlim: BeamDivergenceLimits, wbins: WavelengthBins, + beam_size: BeamSize[RunType], + sample_size: SampleSize[RunType], ) -> ReducibleData[RunType]: da = da.transform_coords( ("wavelength", "theta", "angle_of_divergence", "Q"), @@ -85,9 +90,11 @@ def add_common_coords_and_masks( da.bins.masks['wavelength'] = (wbins[0] > da.bins.coords['wavelength']) | ( wbins[-1] < da.bins.coords['wavelength'] ) - - # Footprint correction - da /= sc.sin(da.bins.coords['theta']) + da /= footprint_on_sample( + da.bins.coords['theta'], + beam_size, + sample_size, + ) return da diff --git a/src/ess/amor/resolution.py b/src/ess/amor/resolution.py index 23255a8c..34be1106 100644 --- a/src/ess/amor/resolution.py +++ b/src/ess/amor/resolution.py @@ -2,6 +2,7 @@ # Copyright (c) 2023 Scipp contributors (https://github.com/scipp) import scipp as sc +from ..reflectometry.tools import fwhm_to_std from ..reflectometry.types import ( DetectorSpatialResolution, QBins, @@ -17,27 +18,6 @@ WavelengthResolution, ) -_STD_TO_FWHM = sc.scalar(2.0) * sc.sqrt(sc.scalar(2.0) * sc.log(sc.scalar(2.0))) - - -def fwhm_to_std(fwhm: sc.Variable) -> sc.Variable: - """ - Convert from full-width half maximum to standard deviation. - - Parameters - ---------- - fwhm: - Full-width half maximum. - - Returns - ------- - : - Standard deviation. - """ - # Enables the conversion from full width half - # maximum to standard deviation - return fwhm / _STD_TO_FWHM - def wavelength_resolution( L1, diff --git a/src/ess/reflectometry/corrections.py b/src/ess/reflectometry/corrections.py new file mode 100644 index 00000000..967cb2de --- /dev/null +++ b/src/ess/reflectometry/corrections.py @@ -0,0 +1,31 @@ +import scipp as sc + +from .tools import fwhm_to_std + + +def footprint_on_sample( + theta: sc.Variable, + beam_size: sc.Variable, + sample_size: sc.Variable, +) -> sc.Variable: + """ + Computes the fraction of the beam hitting the sample. + Depends on :math:`\\theta`. + + Parameters + ---------- + theta: + Incidence angle relative to sample surface. + beam_size: + Full width half maximum of the beam. + sample_size: + Size of the sample. + TODO: check what sample size actually means. Is it the sample diameter? etc. + + Returns + ------- + : + Fraction of beam hitting the sample. + """ + size_of_beam_on_sample = beam_size / sc.sin(theta) + return sc.erf(fwhm_to_std(sample_size / size_of_beam_on_sample)) diff --git a/src/ess/reflectometry/tools.py b/src/ess/reflectometry/tools.py index 40941642..99cf7e52 100644 --- a/src/ess/reflectometry/tools.py +++ b/src/ess/reflectometry/tools.py @@ -13,6 +13,27 @@ from ess.reflectometry import orso from ess.reflectometry.types import ReflectivityOverQ +_STD_TO_FWHM = sc.scalar(2.0) * sc.sqrt(sc.scalar(2.0) * sc.log(sc.scalar(2.0))) + + +def fwhm_to_std(fwhm: sc.Variable) -> sc.Variable: + """ + Convert from full-width half maximum to standard deviation. + + Parameters + ---------- + fwhm: + Full-width half maximum. + + Returns + ------- + : + Standard deviation. + """ + # Enables the conversion from full width half + # maximum to standard deviation + return fwhm / _STD_TO_FWHM + def linlogspace( dim: str, From 74d6b662acc79764ef7f41e4f0a16af753a8e83a Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Fri, 29 Nov 2024 14:46:20 +0100 Subject: [PATCH 05/37] docs: clarify meaning of argument --- src/ess/reflectometry/corrections.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/ess/reflectometry/corrections.py b/src/ess/reflectometry/corrections.py index 967cb2de..baf91de9 100644 --- a/src/ess/reflectometry/corrections.py +++ b/src/ess/reflectometry/corrections.py @@ -19,8 +19,7 @@ def footprint_on_sample( beam_size: Full width half maximum of the beam. sample_size: - Size of the sample. - TODO: check what sample size actually means. Is it the sample diameter? etc. + Size of the sample, width in the beam direction. Returns ------- From 0e17ad5ecd89b7fcb604e35bb72dfd1b2a88d8d3 Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Fri, 29 Nov 2024 14:55:39 +0100 Subject: [PATCH 06/37] rename --- src/ess/amor/load.py | 4 +++- src/ess/amor/normalization.py | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/ess/amor/load.py b/src/ess/amor/load.py index 4fd80e75..13795474 100644 --- a/src/ess/amor/load.py +++ b/src/ess/amor/load.py @@ -48,7 +48,9 @@ def load_events( for name, coord in detector_numbers.coords.items(): data.coords[name] = coord - data.coords['iz'] = Detector.nWires * data.coords['blade'] + data.coords['wire'] + data.coords['z_index'] = ( + Detector.nWires * data.coords['blade'] + data.coords['wire'] + ) if data.bins.constituents["data"].data.variances is None: data.bins.constituents["data"].data.variances = data.bins.constituents[ diff --git a/src/ess/amor/normalization.py b/src/ess/amor/normalization.py index 8dfefbd2..1a237b1c 100644 --- a/src/ess/amor/normalization.py +++ b/src/ess/amor/normalization.py @@ -24,8 +24,8 @@ def _add_pre_reduction_masks(da, zindex_limits): - da.masks['z_range'] = (da.coords["iz"] < zindex_limits[0]) | ( - da.coords["iz"] > zindex_limits[1] + da.masks['z_range'] = (da.coords["z_index"] < zindex_limits[0]) | ( + da.coords["z_index"] > zindex_limits[1] ) From 5f9c989d1c06ccfa72a42804cd2019f412730281 Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Fri, 29 Nov 2024 14:58:13 +0100 Subject: [PATCH 07/37] update notebook --- docs/user-guide/amor/amor-reduction.ipynb | 35 +++++++++++++++-------- 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/docs/user-guide/amor/amor-reduction.ipynb b/docs/user-guide/amor/amor-reduction.ipynb index 54461ab1..09cabeb9 100644 --- a/docs/user-guide/amor/amor-reduction.ipynb +++ b/docs/user-guide/amor/amor-reduction.ipynb @@ -76,13 +76,14 @@ "workflow[ChopperPhase[SampleRun]] = sc.scalar(-7.5, unit='deg')\n", "\n", "workflow[QBins] = sc.geomspace(dim='Q', start=0.005, stop=0.3, num=391, unit='1/angstrom')\n", - "workflow[WavelengthBins] = sc.geomspace('wavelength', 2.8, 12, 301, unit='angstrom')\n", + "workflow[WavelengthBins] = sc.geomspace('wavelength', 2.8, 12.5, 301, unit='angstrom')\n", "\n", "# The YIndexLimits and ZIndexLimits define ranges on the detector where\n", "# data is considered to be valid signal.\n", "# They represent the lower and upper boundaries of a range of pixel indices.\n", - "workflow[YIndexLimits] = sc.scalar(11, unit=None), sc.scalar(41, unit=None)\n", - "workflow[ZIndexLimits] = sc.scalar(80, unit=None), sc.scalar(370, unit=None)" + "workflow[YIndexLimits] = sc.scalar(11), sc.scalar(41)\n", + "workflow[ZIndexLimits] = sc.scalar(80), sc.scalar(370)\n", + "workflow[BeamDivergenceLimits] = sc.scalar(-0.75, unit='deg'), sc.scalar(0.75, unit='deg')" ] }, { @@ -116,9 +117,9 @@ "# The sample rotation value in the file is slightly off, so we set it manually\n", "workflow[SampleRotation[ReferenceRun]] = sc.scalar(0.65, unit='deg')\n", "\n", - "reference_result = workflow.compute(IdealReferenceIntensity)\n", + "reference_result = workflow.compute(ReducedReference)\n", "# Set the result back onto the pipeline to cache it\n", - "workflow[IdealReferenceIntensity] = reference_result" + "workflow[ReducedReference] = reference_result" ] }, { @@ -198,6 +199,7 @@ " },\n", "}\n", "\n", + "\n", "reflectivity = {}\n", "for run_number, params in runs.items():\n", " workflow[Filename[SampleRun]] = params[Filename[SampleRun]]\n", @@ -282,11 +284,20 @@ "for run_number, params in runs.items():\n", " workflow[Filename[SampleRun]] = params[Filename[SampleRun]]\n", " workflow[SampleRotation[SampleRun]] = params[SampleRotation[SampleRun]]\n", - " diagnostics[run_number] = workflow.compute((ReflectivityData, ThetaBins[SampleRun]))\n", + " diagnostics[run_number] = workflow.compute((ReflectivityOverZW, ThetaBins[SampleRun]))\n", "\n", "# Scale the results using the scale factors computed earlier\n", - "for run_number, scale_factor in zip(reflectivity.keys(), scale_factors, strict=True):\n", - " diagnostics[run_number][ReflectivityData] *= scale_factor" + "#for run_number, scale_factor in zip(reflectivity.keys(), scale_factors, strict=True):\n", + "# diagnostics[run_number][ReflectivityOverZW] *= scale_factor" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "diagnostics['608'][ReflectivityOverZW].hist().flatten(('blade', 'wire'), to='z').plot(norm='log')" ] }, { @@ -306,7 +317,7 @@ "from ess.amor.figures import wavelength_theta_figure\n", "\n", "wavelength_theta_figure(\n", - " [result[ReflectivityData] for result in diagnostics.values()],\n", + " [result[ReflectivityOverZW] for result in diagnostics.values()],\n", " theta_bins=[result[ThetaBins[SampleRun]] for result in diagnostics.values()],\n", " q_edges_to_display=(sc.scalar(0.018, unit='1/angstrom'), sc.scalar(0.113, unit='1/angstrom'))\n", ")" @@ -336,7 +347,7 @@ "from ess.amor.figures import q_theta_figure\n", "\n", "q_theta_figure(\n", - " [res[ReflectivityData] for res in diagnostics.values()],\n", + " [res[ReflectivityOverZW] for res in diagnostics.values()],\n", " theta_bins=[res[ThetaBins[SampleRun]] for res in diagnostics.values()],\n", " q_bins=workflow.compute(QBins)\n", ")" @@ -364,11 +375,11 @@ "workflow[Filename[SampleRun]] = runs['608'][Filename[SampleRun]]\n", "workflow[SampleRotation[SampleRun]] = runs['608'][SampleRotation[SampleRun]]\n", "wavelength_z_figure(\n", - " workflow.compute(FootprintCorrectedData[SampleRun]),\n", + " workflow.compute(ReducibleData[SampleRun]),\n", " wavelength_bins=workflow.compute(WavelengthBins),\n", " grid=False\n", ") + wavelength_z_figure(\n", - " reference_result,\n", + " workflow.compute(Reference),\n", " grid=False\n", ")" ] From 840dbab83ea77b528db1dbc0bedb86ff9d3f95a2 Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Mon, 2 Dec 2024 13:41:15 +0100 Subject: [PATCH 08/37] fix: move resolution aggregation to normalization step --- src/ess/amor/__init__.py | 1 - src/ess/amor/conversions.py | 9 ++-- src/ess/amor/load.py | 5 ++ src/ess/amor/normalization.py | 75 ++++++++++++++++++++++------- src/ess/amor/resolution.py | 86 ++++++---------------------------- src/ess/reflectometry/types.py | 3 ++ 6 files changed, 86 insertions(+), 93 deletions(-) diff --git a/src/ess/amor/__init__.py b/src/ess/amor/__init__.py index e800d889..cfa95309 100644 --- a/src/ess/amor/__init__.py +++ b/src/ess/amor/__init__.py @@ -41,7 +41,6 @@ *load.providers, *conversions.providers, *normalization.providers, - *resolution.providers, *utils.providers, *figures.providers, *orso.providers, diff --git a/src/ess/amor/conversions.py b/src/ess/amor/conversions.py index 37f53ef9..91432c6e 100644 --- a/src/ess/amor/conversions.py +++ b/src/ess/amor/conversions.py @@ -11,9 +11,9 @@ RawDetectorData, ReducibleData, RunType, - SampleSize, WavelengthBins, YIndexLimits, + ZIndexLimits, ) @@ -60,10 +60,10 @@ def wavelength( def add_common_coords_and_masks( da: RawDetectorData[RunType], ylim: YIndexLimits, + zlims: ZIndexLimits, bdlim: BeamDivergenceLimits, wbins: WavelengthBins, beam_size: BeamSize[RunType], - sample_size: SampleSize[RunType], ) -> ReducibleData[RunType]: da = da.transform_coords( ("wavelength", "theta", "angle_of_divergence", "Q"), @@ -80,6 +80,9 @@ def add_common_coords_and_masks( da.masks["stripe_range"] = (da.coords["stripe"] < ylim[0]) | ( da.coords["stripe"] > ylim[1] ) + da.masks['z_range'] = (da.coords["z_index"] < zlims[0]) | ( + da.coords["z_index"] > zlims[1] + ) da.bins.masks["divergence_too_large"] = ( da.bins.coords["angle_of_divergence"] < bdlim[0].to(unit=da.bins.coords["angle_of_divergence"].bins.unit) @@ -93,7 +96,7 @@ def add_common_coords_and_masks( da /= footprint_on_sample( da.bins.coords['theta'], beam_size, - sample_size, + da.coords['sample_size'], ) return da diff --git a/src/ess/amor/load.py b/src/ess/amor/load.py index 13795474..0e8feb8c 100644 --- a/src/ess/amor/load.py +++ b/src/ess/amor/load.py @@ -13,6 +13,7 @@ RawDetectorData, RunType, SampleRotation, + SampleSize, ) from .geometry import Detector, pixel_coordinates_in_detector_system from .types import ( @@ -37,6 +38,8 @@ def load_events( chopper_phase: ChopperPhase[RunType], chopper_frequency: ChopperFrequency[RunType], chopper_distance: ChopperDistance[RunType], + chopper_separation: ChopperSeparation[RunType], + sample_size: SampleSize[RunType], ) -> RawDetectorData[RunType]: detector_numbers = pixel_coordinates_in_detector_system() data = ( @@ -61,8 +64,10 @@ def load_events( data.coords["detector_rotation"] = detector_rotation.to(unit='rad') data.coords["chopper_phase"] = chopper_phase data.coords["chopper_frequency"] = chopper_frequency + data.coords["chopper_separation"] = sc.abs(chopper_separation) data.coords["L1"] = sc.abs(chopper_distance) data.coords["L2"] = data.coords['distance_in_detector'] + Detector.distance + data.coords["sample_size"] = sample_size return RawDetectorData[RunType](data) diff --git a/src/ess/amor/normalization.py b/src/ess/amor/normalization.py index 1a237b1c..66764846 100644 --- a/src/ess/amor/normalization.py +++ b/src/ess/amor/normalization.py @@ -8,25 +8,51 @@ supermirror_reflectivity, ) from ..reflectometry.types import ( + DetectorSpatialResolution, QBins, - QResolution, ReducedReference, ReducibleData, Reference, ReferenceRun, ReflectivityOverQ, ReflectivityOverZW, + Sample, SampleRun, WavelengthBins, - ZIndexLimits, ) from .conversions import theta +from .resolution import ( + angular_resolution, + q_resolution, + sample_size_resolution, + wavelength_resolution, +) -def _add_pre_reduction_masks(da, zindex_limits): - da.masks['z_range'] = (da.coords["z_index"] < zindex_limits[0]) | ( - da.coords["z_index"] > zindex_limits[1] +def mask_events_where_supermirror_does_not_cover( + sam: ReducibleData[SampleRun], + ref: ReducedReference, + critical_edge: CriticalEdge, + mvalue: MValue, + alpha: Alpha, +) -> Sample: + R = supermirror_reflectivity( + reflectometry_q( + sam.bins.coords["wavelength"], + theta( + sam.bins.coords["wavelength"], + sam.coords["pixel_divergence_angle"], + sam.coords["L2"], + ref.coords["sample_rotation"], + ref.coords["detector_rotation"], + ), + ), + c=critical_edge, + m=mvalue, + alpha=alpha, ) + sam.bins.masks["supermirror_does_not_cover"] = sc.isnan(R) + return sam def reduce_reference( @@ -48,25 +74,28 @@ def reduce_reference( def reduce_sample_over_q( - sample: ReducibleData[SampleRun], + sample: Sample, reference: Reference, qbins: QBins, - zlims: ZIndexLimits, - qresolution: QResolution, ) -> ReflectivityOverQ: - _add_pre_reduction_masks(sample, zlims) - R = sample.bins.concat().bin(Q=qbins) / reference.flatten(to='Q').hist(Q=qbins).data - R.coords['Q_resolution'] = qresolution.data + h = reference.flatten(to='Q').hist(Q=qbins) + R = sample.bins.concat().bin(Q=qbins) / h.data + R.coords['Q_resolution'] = sc.sqrt( + ( + (reference * reference.coords['Q_resolution'] ** 2) + .flatten(to='Q') + .hist(Q=qbins) + ) + / h + ).data return R def reduce_sample_over_zw( - sample: ReducibleData[SampleRun], + sample: Sample, reference: Reference, - zlims: ZIndexLimits, wbins: WavelengthBins, ) -> ReflectivityOverZW: - _add_pre_reduction_masks(sample, zlims) R = sample.bins.concat(('stripe',)).bin(wavelength=wbins) / reference.data R.masks["too_few_events"] = reference.data < sc.scalar(1, unit="counts") return R @@ -76,22 +105,33 @@ def evaluate_reference( reference: ReducedReference, sample: ReducibleData[SampleRun], qbins: QBins, - zlims: ZIndexLimits, + detector_spatial_resolution: DetectorSpatialResolution[SampleRun], ) -> Reference: ref = reference.copy() ref.coords["sample_rotation"] = sample.coords["sample_rotation"] ref.coords["detector_rotation"] = sample.coords["detector_rotation"] + ref.coords["sample_size"] = sample.coords["sample_size"] + ref.coords["detector_spatial_resolution"] = detector_spatial_resolution ref.coords["wavelength"] = sc.midpoints(ref.coords["wavelength"]) ref = ref.transform_coords( - ("theta", "Q"), + ( + "Q", + "wavelength_resolution", + "sample_size_resolution", + "angular_resolution", + "Q_resolution", + ), { "divergence_angle": "pixel_divergence_angle", "theta": theta, "Q": reflectometry_q, + "wavelength_resolution": wavelength_resolution, + "sample_size_resolution": sample_size_resolution, + "angular_resolution": angular_resolution, + "Q_resolution": q_resolution, }, rename_dims=False, ) - _add_pre_reduction_masks(ref, zlims) return sc.values(ref) @@ -100,4 +140,5 @@ def evaluate_reference( reduce_sample_over_q, reduce_sample_over_zw, evaluate_reference, + mask_events_where_supermirror_does_not_cover, ) diff --git a/src/ess/amor/resolution.py b/src/ess/amor/resolution.py index 34be1106..c2fbf546 100644 --- a/src/ess/amor/resolution.py +++ b/src/ess/amor/resolution.py @@ -3,20 +3,6 @@ import scipp as sc from ..reflectometry.tools import fwhm_to_std -from ..reflectometry.types import ( - DetectorSpatialResolution, - QBins, - QResolution, - Reference, - SampleRun, - SampleSize, -) -from .types import ( - AngularResolution, - ChopperSeparation, - SampleSizeResolution, - WavelengthResolution, -) def wavelength_resolution( @@ -41,19 +27,11 @@ def wavelength_resolution( Returns ------- : - The angular resolution variable, as standard deviation. + The wavelength resolution variable, as standard deviation. """ return fwhm_to_std(chopper_separation / (L1 + L2)) -def _wavelength_resolution( - da: Reference, chopper_separation: ChopperSeparation[SampleRun] -) -> WavelengthResolution: - return wavelength_resolution( - L1=da.coords['L1'], L2=da.coords['L2'], chopper_separation=chopper_separation - ) - - def sample_size_resolution( L2, sample_size, @@ -78,12 +56,6 @@ def sample_size_resolution( return fwhm_to_std(sample_size / L2.to(unit=sample_size.unit)) -def _sample_size_resolution( - da: Reference, sample_size: SampleSize[SampleRun] -) -> SampleSizeResolution: - return sample_size_resolution(L2=da.coords['L2'], sample_size=sample_size) - - def angular_resolution( theta, L2, @@ -118,62 +90,32 @@ def angular_resolution( ) -def _angular_resolution( - da: Reference, detector_spatial_resolution: DetectorSpatialResolution[SampleRun] -) -> AngularResolution: - return angular_resolution( - theta=da.coords['theta'], - L2=da.coords['L2'], - detector_spatial_resolution=detector_spatial_resolution, - ) - - -def sigma_Q( - ref: Reference, - angular_resolution: AngularResolution, - wavelength_resolution: WavelengthResolution, - sample_size_resolution: SampleSizeResolution, - qbins: QBins, -) -> QResolution: +def q_resolution( + Q, + angular_resolution, + wavelength_resolution, + sample_size_resolution, +): """ - Combine all of the components of the resolution and add Q contribution. + Compute resolution in Q. Parameters ---------- + Q: + Momentum transfer. angular_resolution: Angular resolution contribution. wavelength_resolution: Wavelength resolution contribution. sample_size_resolution: Sample size resolution contribution. - q_bins: - Q-bin values. Returns ------- : - Combined resolution function. + Q resolution function. """ - h = ref.flatten(to='Q').hist(Q=qbins) - s = ( - ( - ref - * ( - angular_resolution**2 - + wavelength_resolution**2 - + sample_size_resolution**2 - ) - * ref.coords['Q'] ** 2 - ) - .flatten(to='Q') - .hist(Q=qbins) + return sc.sqrt( + (angular_resolution**2 + wavelength_resolution**2 + sample_size_resolution**2) + * Q**2 ) - return sc.sqrt(sc.values(s / h)) - - -providers = ( - sigma_Q, - _angular_resolution, - _wavelength_resolution, - _sample_size_resolution, -) diff --git a/src/ess/reflectometry/types.py b/src/ess/reflectometry/types.py index 32cae2c1..946858ab 100644 --- a/src/ess/reflectometry/types.py +++ b/src/ess/reflectometry/types.py @@ -43,6 +43,9 @@ class ReducibleData(sciline.Scope[RunType, sc.DataArray], sc.DataArray): Reference = NewType("NormalizationFactor", sc.DataArray) """:code`ReducedReference` histogrammed in sample ``Q``""" +Sample = NewType("Sample", sc.DataArray) +""":code`Sample` measurement prepared for reduction""" + ReflectivityOverQ = NewType("ReflectivityOverQ", sc.DataArray) """Intensity histogram over momentum transfer normalized by the calibrated reference measurement.""" From a41415592341f9ef4c1dd85adb75fd39f899cdd9 Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Mon, 2 Dec 2024 14:58:51 +0100 Subject: [PATCH 09/37] fix: get resolution from ioq --- src/ess/amor/orso.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/ess/amor/orso.py b/src/ess/amor/orso.py index 970f2a09..04ef463e 100644 --- a/src/ess/amor/orso.py +++ b/src/ess/amor/orso.py @@ -14,7 +14,7 @@ OrsoIofQDataset, OrsoReduction, ) -from ..reflectometry.types import QResolution, ReflectivityOverQ +from ..reflectometry.types import ReflectivityOverQ def build_orso_instrument(events: ReflectivityOverQ) -> OrsoInstrument: @@ -34,7 +34,6 @@ def build_orso_instrument(events: ReflectivityOverQ) -> OrsoInstrument: def build_orso_iofq_dataset( iofq: ReflectivityOverQ, - sigma_q: QResolution, data_source: OrsoDataSource, reduction: OrsoReduction, ) -> OrsoIofQDataset: @@ -60,7 +59,7 @@ def build_orso_iofq_dataset( qz = sc.midpoints(qz) r = sc.values(iofq.data) sr = sc.stddevs(iofq.data) - sqz = sigma_q.to(unit="1/angstrom", copy=False) + sqz = iofq.coords["Q_resolution"].to(unit="1/angstrom", copy=False) data = np.column_stack(tuple(map(_extract_values_array, (qz, r, sr, sqz)))) data = data[np.isfinite(data).all(axis=-1)] From 8fafbe33411ae1d971769391c872b21179f660de Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Mon, 2 Dec 2024 15:10:56 +0100 Subject: [PATCH 10/37] fix: set list of corrections manually --- src/ess/amor/orso.py | 8 +++++- src/ess/reflectometry/orso.py | 49 +--------------------------------- src/ess/reflectometry/tools.py | 3 --- 3 files changed, 8 insertions(+), 52 deletions(-) diff --git a/src/ess/amor/orso.py b/src/ess/amor/orso.py index 04ef463e..b614539a 100644 --- a/src/ess/amor/orso.py +++ b/src/ess/amor/orso.py @@ -63,7 +63,13 @@ def build_orso_iofq_dataset( data = np.column_stack(tuple(map(_extract_values_array, (qz, r, sr, sqz)))) data = data[np.isfinite(data).all(axis=-1)] - return OrsoIofQDataset(OrsoDataset(header, data)) + ds = OrsoIofQDataset(OrsoDataset(header, data)) + ds.info.reduction.corrections = [ + "chopper ToF correction", + "footprint correction", + "supermirror calibration", + ] + return ds def _extract_values_array(var: sc.Variable) -> np.ndarray: diff --git a/src/ess/reflectometry/orso.py b/src/ess/reflectometry/orso.py index e4bdcc2d..8185a6f2 100644 --- a/src/ess/reflectometry/orso.py +++ b/src/ess/reflectometry/orso.py @@ -6,11 +6,10 @@ of reference runs and only use the metadata of the sample run. """ -import graphlib import os import platform from datetime import datetime, timezone -from typing import Any, NewType +from typing import NewType from dateutil.parser import parse as parse_datetime from orsopy.fileio import base as orso_base @@ -23,12 +22,6 @@ SampleRun, ) -try: - from sciline.task_graph import TaskGraph -except ModuleNotFoundError: - TaskGraph = Any - - OrsoCreator = NewType("OrsoCreator", orso_base.Person) """ORSO creator, that is, the person who processed the data.""" @@ -178,46 +171,6 @@ def build_orso_data_source( ) -_CORRECTIONS_BY_GRAPH_KEY = { - # ReducibleDetectorData[SampleRun]: "chopper ToF correction", - # FootprintCorrectedData[SampleRun]: "footprint correction", - # SupermirrorReflectivityCorrection: "supermirror calibration", -} - - -def find_corrections(task_graph: TaskGraph) -> list[str]: - """Determine the list of corrections for ORSO from a task graph. - - Checks for known keys in the graph that correspond to corrections - that should be tracked in an ORSO output dataset. - Bear in mind that this exclusively checks the types used as keys in a task graph, - it cannot detect other corrections that are performed within providers - or outside the graph. - - Parameters - ---------- - : - task_graph: - The task graph used to produce output data. - - Returns - ------- - : - List of corrections in the order they are applied in. - """ - toposort = graphlib.TopologicalSorter( - { - key: tuple(provider.arg_spec.keys()) - for key, provider in task_graph._graph.items() - } - ) - return [ - c - for key in toposort.static_order() - if (c := _CORRECTIONS_BY_GRAPH_KEY.get(key, None)) is not None - ] - - providers = ( build_orso_data_source, build_orso_measurement, diff --git a/src/ess/reflectometry/tools.py b/src/ess/reflectometry/tools.py index 99cf7e52..23073b8c 100644 --- a/src/ess/reflectometry/tools.py +++ b/src/ess/reflectometry/tools.py @@ -333,8 +333,5 @@ def orso_datasets_from_measurements( wf[name] = value wf[ReflectivityOverQ] = scale_factor * curve dataset = wf.compute(orso.OrsoIofQDataset) - dataset.info.reduction.corrections = orso.find_corrections( - wf.get(orso.OrsoIofQDataset) - ) datasets.append(dataset) return datasets From a613608f870d3d4d6fa0bbd86bc2c61ef092430d Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Mon, 2 Dec 2024 16:41:10 +0100 Subject: [PATCH 11/37] fix tests --- src/ess/amor/load.py | 6 ++--- src/ess/reflectometry/workflow.py | 13 +++-------- tests/amor/pipeline_test.py | 37 +++++++++++-------------------- tests/corrections_test.py | 19 +++++----------- 4 files changed, 24 insertions(+), 51 deletions(-) diff --git a/src/ess/amor/load.py b/src/ess/amor/load.py index 0e8feb8c..fc134b88 100644 --- a/src/ess/amor/load.py +++ b/src/ess/amor/load.py @@ -2,8 +2,6 @@ # Copyright (c) 2023 Scipp contributors (https://github.com/scipp) import scipp as sc -from ess.reduce import nexus - from ..reflectometry.load import load_nx from ..reflectometry.types import ( DetectorRotation, @@ -28,7 +26,7 @@ def load_detector( file_path: Filename[RunType], detector_name: NeXusDetectorName[RunType] ) -> LoadedNeXusDetector[RunType]: - return nexus.load_detector(file_path=file_path, detector_name=detector_name) + return next(load_nx(file_path, f"NXentry/NXinstrument/{detector_name}")) def load_events( @@ -43,7 +41,7 @@ def load_events( ) -> RawDetectorData[RunType]: detector_numbers = pixel_coordinates_in_detector_system() data = ( - nexus.extract_detector_data(detector) + detector["data"] .bins.constituents["data"] .group(detector_numbers.data.flatten(to='event_id')) .fold("event_id", sizes=detector_numbers.sizes) diff --git a/src/ess/reflectometry/workflow.py b/src/ess/reflectometry/workflow.py index 1af48cfb..be07429e 100644 --- a/src/ess/reflectometry/workflow.py +++ b/src/ess/reflectometry/workflow.py @@ -22,16 +22,9 @@ def _concatenate_event_lists(*das): - return ( - sc.reduce(das) - .bins.concat() - .assign_coords( - { - name: das[0].coords[name] - for name in ('position', 'sample_rotation', 'detector_rotation') - } - ) - ) + da = sc.reduce(das).bins.concat() + missing_coords = set(das[0].coords) - set(da.coords) + return da.assign_coords({coord: das[0].coords[coord] for coord in missing_coords}) def _any_value(x, *_): diff --git a/tests/amor/pipeline_test.py b/tests/amor/pipeline_test.py index acdd039e..c007c35f 100644 --- a/tests/amor/pipeline_test.py +++ b/tests/amor/pipeline_test.py @@ -34,12 +34,11 @@ def amor_pipeline() -> sciline.Pipeline: pl[SampleSize[ReferenceRun]] = sc.scalar(10.0, unit="mm") pl[WavelengthBins] = sc.geomspace("wavelength", 2.8, 12, 300, unit="angstrom") - pl[YIndexLimits] = sc.scalar(11, unit=None), sc.scalar(41, unit=None) - pl[ZIndexLimits] = sc.scalar(80, unit=None), sc.scalar(370, unit=None) + pl[YIndexLimits] = sc.scalar(11), sc.scalar(41) + pl[ZIndexLimits] = sc.scalar(80), sc.scalar(370) pl[QBins] = sc.geomspace( dim="Q", start=0.005, stop=0.115, num=391, unit="1/angstrom" ) - # The sample rotation value in the file is slightly off, so we set it manually pl[SampleRotation[ReferenceRun]] = sc.scalar(0.65, unit="deg") pl[Filename[ReferenceRun]] = amor.data.amor_reference_run() @@ -56,28 +55,29 @@ def amor_pipeline() -> sciline.Pipeline: @pytest.mark.filterwarnings("ignore:Failed to convert .* into a transformation") @pytest.mark.filterwarnings("ignore:Invalid transformation, missing attribute") -def test_run_data_pipeline(amor_pipeline: sciline.Pipeline): +def test_has_expected_coordinates(amor_pipeline: sciline.Pipeline): # The sample rotation value in the file is slightly off, so we set it manually amor_pipeline[SampleRotation[SampleRun]] = sc.scalar(0.85, unit="deg") amor_pipeline[Filename[SampleRun]] = amor.data.amor_sample_run(608) - res = amor_pipeline.compute(ReflectivityOverQ) - assert "Q" in res.coords - assert "Q_resolution" in res.coords + reflectivity_over_q = amor_pipeline.compute(ReflectivityOverQ) + assert "Q" in reflectivity_over_q.coords + assert "Q_resolution" in reflectivity_over_q.coords @pytest.mark.filterwarnings("ignore:Failed to convert .* into a transformation") @pytest.mark.filterwarnings("ignore:Invalid transformation, missing attribute") -def test_run_full_pipeline(amor_pipeline: sciline.Pipeline): +def test_orso_pipeline(amor_pipeline: sciline.Pipeline): + # The sample rotation value in the file is slightly off, so we set it manually amor_pipeline[SampleRotation[SampleRun]] = sc.scalar(0.85, unit="deg") - # Make the Q range cover a larger interval than the experiment is sensitive to. - # This let's us test the non-covered regions are filtered from the ORSO data. - amor_pipeline[QBins] = sc.geomspace( - dim="Q", start=0.005, stop=0.15, num=391, unit="1/angstrom" - ) amor_pipeline[Filename[SampleRun]] = amor.data.amor_sample_run(608) res = amor_pipeline.compute(orso.OrsoIofQDataset) assert res.info.data_source.experiment.instrument == "Amor" assert res.info.reduction.software.name == "ess.reflectometry" + assert res.info.reduction.corrections == [ + "chopper ToF correction", + "footprint correction", + "supermirror calibration", + ] assert res.data.ndim == 2 assert res.data.shape[1] == 4 assert np.all(res.data[:, 1] >= 0) @@ -127,14 +127,3 @@ def test_pipeline_merging_events_result_unchanged(amor_pipeline: sciline.Pipelin assert_allclose( 2 * sc.variances(result.data), sc.variances(result2.data), rtol=sc.scalar(1e-6) ) - - -def test_find_corrections(amor_pipeline: sciline.Pipeline): - amor_pipeline[Filename[SampleRun]] = amor.data.amor_sample_run(608) - graph = amor_pipeline.get(orso.OrsoIofQDataset) - # In topological order - assert orso.find_corrections(graph) == [ - "chopper ToF correction", - "footprint correction", - "supermirror calibration", - ] diff --git a/tests/corrections_test.py b/tests/corrections_test.py index 3e7607cb..f5b4554c 100644 --- a/tests/corrections_test.py +++ b/tests/corrections_test.py @@ -1,29 +1,22 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright (c) 2023 Scipp contributors (https://github.com/scipp) import scipp as sc -from scipp import constants as cst +from scipp.constants import pi from scipp.testing import assert_allclose from ess.reflectometry import corrections -def test_footprint_correction(): - data = sc.DataArray( - data=sc.array(dims=['row'], values=[1.0], unit='counts'), - coords={ - "theta": sc.array(dims=['row'], values=[cst.pi.value / 6], unit='rad'), - "px": sc.array(dims=['row'], values=[1], unit=None), - }, - ).group('px') - out = corrections.footprint_correction( - data, +def test_footprint_on_sample(): + footprint = corrections.footprint_on_sample( + pi / 6 * sc.scalar(1, unit='rad'), beam_size=sc.scalar(5, unit='mm'), sample_size=sc.scalar(10, unit='mm'), ) - expected = sc.scalar(1, unit='counts') / sc.erf( + expected = sc.erf( 1 / (sc.scalar(2) * sc.sqrt(sc.scalar(2.0) * sc.log(sc.scalar(2.0)))) ) assert_allclose( - out.data.values[0].data[0], + footprint, expected, ) From 50154913945ca442d9d2d1869683131837e7f4ad Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Mon, 2 Dec 2024 17:43:31 +0100 Subject: [PATCH 12/37] fix: default beam divergence limit --- src/ess/amor/__init__.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/ess/amor/__init__.py b/src/ess/amor/__init__.py index cfa95309..6de89cc8 100644 --- a/src/ess/amor/__init__.py +++ b/src/ess/amor/__init__.py @@ -63,7 +63,10 @@ def default_parameters() -> dict: NeXusDetectorName[RunType]: "detector", ChopperPhase[RunType]: sc.scalar(7.0, unit="deg"), ChopperFrequency[RunType]: sc.scalar(8.333, unit="Hz"), - BeamDivergenceLimits: (sc.scalar(-0.7, unit='deg'), sc.scalar(0.7, unit='deg')), + BeamDivergenceLimits: ( + sc.scalar(-0.75, unit='deg'), + sc.scalar(0.75, unit='deg'), + ), } From 9afa89328d3cfa23ceed448047748d95daf6639b Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Mon, 2 Dec 2024 17:44:25 +0100 Subject: [PATCH 13/37] docs: update after refactor --- docs/user-guide/amor/amor-reduction.ipynb | 43 +++-------------------- docs/user-guide/amor/compare-to-eos.ipynb | 11 +++--- 2 files changed, 9 insertions(+), 45 deletions(-) diff --git a/docs/user-guide/amor/amor-reduction.ipynb b/docs/user-guide/amor/amor-reduction.ipynb index 09cabeb9..db4bc52b 100644 --- a/docs/user-guide/amor/amor-reduction.ipynb +++ b/docs/user-guide/amor/amor-reduction.ipynb @@ -44,23 +44,7 @@ "## Create and configure the workflow\n", "\n", "We begin by creating the Amor workflow object which is a skeleton for reducing Amor data,\n", - "with pre-configured steps." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "workflow = amor.AmorWorkflow()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We then need to set the missing parameters which are specific to each experiment:" + "with pre-configured steps, and then set the missing parameters which are specific to each experiment:" ] }, { @@ -69,6 +53,7 @@ "metadata": {}, "outputs": [], "source": [ + "workflow = amor.AmorWorkflow()\n", "workflow[SampleSize[SampleRun]] = sc.scalar(10.0, unit='mm')\n", "workflow[SampleSize[ReferenceRun]] = sc.scalar(10.0, unit='mm')\n", "\n", @@ -76,7 +61,7 @@ "workflow[ChopperPhase[SampleRun]] = sc.scalar(-7.5, unit='deg')\n", "\n", "workflow[QBins] = sc.geomspace(dim='Q', start=0.005, stop=0.3, num=391, unit='1/angstrom')\n", - "workflow[WavelengthBins] = sc.geomspace('wavelength', 2.8, 12.5, 301, unit='angstrom')\n", + "workflow[WavelengthBins] = sc.geomspace('wavelength', 2.8, 12.5, 2001, unit='angstrom')\n", "\n", "# The YIndexLimits and ZIndexLimits define ranges on the detector where\n", "# data is considered to be valid signal.\n", @@ -181,7 +166,7 @@ "runs = {\n", " '608': {\n", " # The sample rotation values in the files are slightly off, so we replace\n", - " # them with corrected values.\n", + " # them with corrected values._coord_coord\n", " SampleRotation[SampleRun]: sc.scalar(0.85, unit='deg'),\n", " Filename[SampleRun]: amor.data.amor_sample_run('608'),\n", " },\n", @@ -375,7 +360,7 @@ "workflow[Filename[SampleRun]] = runs['608'][Filename[SampleRun]]\n", "workflow[SampleRotation[SampleRun]] = runs['608'][SampleRotation[SampleRun]]\n", "wavelength_z_figure(\n", - " workflow.compute(ReducibleData[SampleRun]),\n", + " workflow.compute(Sample),\n", " wavelength_bins=workflow.compute(WavelengthBins),\n", " grid=False\n", ") + wavelength_z_figure(\n", @@ -466,24 +451,6 @@ ")" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To support tracking provenance, we also list the corrections that were done by the workflow and store them in the dataset:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "iofq_dataset.info.reduction.corrections = orso.find_corrections(\n", - " workflow.get(orso.OrsoIofQDataset)\n", - ")" - ] - }, { "cell_type": "markdown", "metadata": {}, diff --git a/docs/user-guide/amor/compare-to-eos.ipynb b/docs/user-guide/amor/compare-to-eos.ipynb index c814ea6f..fd861f24 100644 --- a/docs/user-guide/amor/compare-to-eos.ipynb +++ b/docs/user-guide/amor/compare-to-eos.ipynb @@ -85,15 +85,12 @@ "workflow[SampleSize[ReferenceRun]] = sc.scalar(10.0, unit='mm')\n", "workflow[ChopperPhase[SampleRun]] = sc.scalar(-7.5, unit='deg')\n", "\n", - "# In Jochens workflow:\n", - "# * divergence mask: [-0.7, 0.7]\n", - "# * note, divergence relative to beam center\n", "workflow[WavelengthBins] = sc.geomspace('wavelength', 2.8, 12.5, 301, unit='angstrom')\n", "workflow[QBins] = sc.geomspace(\n", " dim='Q', start=0.00505, stop=2.93164766e-01, num=391, unit='1/angstrom'\n", ")\n", - "workflow[YIndexLimits] = sc.scalar(11, unit=None), sc.scalar(41, unit=None)\n", - "workflow[ZIndexLimits] = sc.scalar(80, unit=None), sc.scalar(370, unit=None)\n", + "workflow[YIndexLimits] = sc.scalar(11), sc.scalar(41)\n", + "workflow[ZIndexLimits] = sc.scalar(80), sc.scalar(370)\n", "\n", "# Chopper phase value in the file is wrong, so we set it manually\n", "workflow[ChopperPhase[ReferenceRun]] = sc.scalar(-7.5, unit='deg')\n", @@ -101,9 +98,9 @@ "workflow[SampleRotation[ReferenceRun]] = sc.scalar(0.65, unit='deg')\n", "workflow[Filename[ReferenceRun]] = amor.data.amor_reference_run()\n", "\n", - "reference_result = workflow.compute(IdealReferenceIntensity)\n", + "reference_result = workflow.compute(ReducedReference)\n", "# Set the result back onto the pipeline to cache it\n", - "workflow[IdealReferenceIntensity] = reference_result" + "workflow[ReducedReference] = reference_result" ] }, { From 48d6313f3907e20308e287a76cf04b1d33c62a4d Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Tue, 3 Dec 2024 01:34:42 +0100 Subject: [PATCH 14/37] fix: add kappad --- src/ess/amor/__init__.py | 4 ++-- src/ess/amor/conversions.py | 37 +++++++++++++++++++++---------------- src/ess/amor/load.py | 11 +++++++++++ src/ess/amor/types.py | 6 ++++++ 4 files changed, 40 insertions(+), 18 deletions(-) diff --git a/src/ess/amor/__init__.py b/src/ess/amor/__init__.py index 6de89cc8..29bbad99 100644 --- a/src/ess/amor/__init__.py +++ b/src/ess/amor/__init__.py @@ -64,8 +64,8 @@ def default_parameters() -> dict: ChopperPhase[RunType]: sc.scalar(7.0, unit="deg"), ChopperFrequency[RunType]: sc.scalar(8.333, unit="Hz"), BeamDivergenceLimits: ( - sc.scalar(-0.75, unit='deg'), - sc.scalar(0.75, unit='deg'), + sc.scalar(-0.75, unit='deg').to(unit='rad'), + sc.scalar(0.75, unit='deg').to(unit='rad'), ), } diff --git a/src/ess/amor/conversions.py b/src/ess/amor/conversions.py index 91432c6e..9ffaf75f 100644 --- a/src/ess/amor/conversions.py +++ b/src/ess/amor/conversions.py @@ -27,8 +27,13 @@ def theta(wavelength, divergence_angle, L2, sample_rotation, detector_rotation): return out -def angle_of_divergence(theta, sample_rotation): - return theta - sample_rotation +def angle_of_divergence(theta, sample_rotation, angle_to_center_of_beam): + return ( + theta + - sample_rotation + - angle_to_center_of_beam + - sc.scalar(0.245, unit='deg').to(unit='rad') + ) def wavelength( @@ -57,6 +62,10 @@ def wavelength( return out.to(unit='angstrom', copy=False) +def _not_between(v, a, b): + return (v < a) | (v > b) + + def add_common_coords_and_masks( da: RawDetectorData[RunType], ylim: YIndexLimits, @@ -77,21 +86,17 @@ def add_common_coords_and_masks( rename_dims=False, keep_intermediate=False, ) - da.masks["stripe_range"] = (da.coords["stripe"] < ylim[0]) | ( - da.coords["stripe"] > ylim[1] - ) - da.masks['z_range'] = (da.coords["z_index"] < zlims[0]) | ( - da.coords["z_index"] > zlims[1] - ) - da.bins.masks["divergence_too_large"] = ( - da.bins.coords["angle_of_divergence"] - < bdlim[0].to(unit=da.bins.coords["angle_of_divergence"].bins.unit) - ) | ( - da.bins.coords["angle_of_divergence"] - > bdlim[1].to(unit=da.bins.coords["angle_of_divergence"].bins.unit) + da.masks["stripe_range"] = _not_between(da.coords["stripe"], *ylim) + da.masks['z_range'] = _not_between(da.coords["z_index"], *zlims) + da.bins.masks["divergence_too_large"] = _not_between( + da.bins.coords["angle_of_divergence"], + bdlim[0].to(unit=da.bins.coords["angle_of_divergence"].bins.unit), + bdlim[1].to(unit=da.bins.coords["angle_of_divergence"].bins.unit), ) - da.bins.masks['wavelength'] = (wbins[0] > da.bins.coords['wavelength']) | ( - wbins[-1] < da.bins.coords['wavelength'] + da.bins.masks['wavelength'] = _not_between( + da.bins.coords['wavelength'], + wbins[0], + wbins[-1], ) da /= footprint_on_sample( da.bins.coords['theta'], diff --git a/src/ess/amor/load.py b/src/ess/amor/load.py index fc134b88..34bd08ea 100644 --- a/src/ess/amor/load.py +++ b/src/ess/amor/load.py @@ -15,6 +15,7 @@ ) from .geometry import Detector, pixel_coordinates_in_detector_system from .types import ( + AngleCenterOfIncomingToHorizon, ChopperDistance, ChopperFrequency, ChopperPhase, @@ -38,6 +39,7 @@ def load_events( chopper_distance: ChopperDistance[RunType], chopper_separation: ChopperSeparation[RunType], sample_size: SampleSize[RunType], + angle_to_center_of_beam: AngleCenterOfIncomingToHorizon[RunType], ) -> RawDetectorData[RunType]: detector_numbers = pixel_coordinates_in_detector_system() data = ( @@ -66,6 +68,7 @@ def load_events( data.coords["L1"] = sc.abs(chopper_distance) data.coords["L2"] = data.coords['distance_in_detector'] + Detector.distance data.coords["sample_size"] = sample_size + data.coords["angle_to_center_of_beam"] = angle_to_center_of_beam.to(unit='rad') return RawDetectorData[RunType](data) @@ -113,6 +116,13 @@ def load_amor_detector_rotation(fp: Filename[RunType]) -> DetectorRotation[RunTy return sc.scalar(nu['value'].data['dim_1', 0]['time', 0].value, unit='deg') +def load_amor_angle_from_horizon_to_center_of_incident_beam( + fp: Filename[RunType], +) -> AngleCenterOfIncomingToHorizon[RunType]: + (kad,) = load_nx(fp, "NXentry/NXinstrument/master_parameters/kad") + return sc.scalar(kad['value'].data['dim_1', 0]['time', 0].value, unit='deg') + + providers = ( load_detector, load_events, @@ -122,5 +132,6 @@ def load_amor_detector_rotation(fp: Filename[RunType]) -> DetectorRotation[RunTy load_amor_chopper_separation, load_amor_sample_rotation, load_amor_detector_rotation, + load_amor_angle_from_horizon_to_center_of_incident_beam, amor_chopper, ) diff --git a/src/ess/amor/types.py b/src/ess/amor/types.py index e9b5481e..4157a5ee 100644 --- a/src/ess/amor/types.py +++ b/src/ess/amor/types.py @@ -35,6 +35,12 @@ class ThetaBins(sciline.Scope[RunType, sc.Variable], sc.Variable): detector pixels have the same theta value.""" +class AngleCenterOfIncomingToHorizon( + sciline.Scope[RunType, sc.DataGroup], sc.DataGroup +): + """Angle from the center of the incoming beam to the horizon.""" + + WavelengthThetaFigure = NewType("WavelengthThetaFigure", Any) WavelengthZIndexFigure = NewType("WavelengthZIndexFigure", Any) QThetaFigure = NewType("QThetaFigure", Any) From 57217ab91f28b717e03a03f4ecd373aea4a6eae7 Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Fri, 6 Dec 2024 15:25:31 +0100 Subject: [PATCH 15/37] docs: add docstrings and explanations --- src/ess/amor/conversions.py | 59 +++++++++++++++++++++++++++++++++++ src/ess/amor/normalization.py | 43 ++++++++++++++++++++++++- 2 files changed, 101 insertions(+), 1 deletion(-) diff --git a/src/ess/amor/conversions.py b/src/ess/amor/conversions.py index 9ffaf75f..fc8b4e0c 100644 --- a/src/ess/amor/conversions.py +++ b/src/ess/amor/conversions.py @@ -18,6 +18,50 @@ def theta(wavelength, divergence_angle, L2, sample_rotation, detector_rotation): + ''' + Angle of reflection. + + Computes the angle between the scattering direction of + the neutron and the sample surface. + + :math:`\\gamma^*` denotes the angle between the scattering direction + and the horizontal plane. + :math:`\\gamma` denotes the angle between the ray from sample position + to detection position + and the horizontal plane. + :math:`L_2` is the length of the ray from sample position to detector position. + :math:`v` is the velocity of the neutron at the sample. + :math:`t` is the travel time from sample to detector. + + The parabolic trajectory of the neutron satisfies + + .. math:: + + \\sin(\\gamma) L_2 = \\sin(\\gamma^*) v t - \\frac{g}{2} t^2 + + and + + .. math:: + + \\cos(\\gamma) L_2 = \\cos(\\gamma^*) vt + + where :math:`g` is the gravitational acceleration. + + The second equation tells us that the approximation :math:`L_2=vt` + will have a small error if :math:`\\gamma` is close to 0 and + the difference between :math:`\\gamma` and :math:`\\gamma^*` is small. + + Using this approximation we can solve the first equation, + and by expressing :math:`v` in terms of the wavelength we get + + .. math:: + + \\sin(\\gamma^*) = + \\sin(\\gamma) + \\frac{g}{2} \\frac{L_2 \\lambda^2 h^2}{m_n^2}. + + Finally, the scattering angle is obtained by subtracting the sample rotation + relative to the horizontal plane. + ''' c = sc.constants.g * sc.constants.m_n**2 / sc.constants.h**2 out = (c * L2 * wavelength**2).to(unit='dimensionless') + sc.sin( divergence_angle + detector_rotation @@ -28,10 +72,20 @@ def theta(wavelength, divergence_angle, L2, sample_rotation, detector_rotation): def angle_of_divergence(theta, sample_rotation, angle_to_center_of_beam): + """ + Difference between the incident angle and the center of the incident beam. + Useful for filtering parts of the beam that have too high divergence. + + On the Amor instrument this is always in the interval [-0.75 deg, 0.75 deg], + but the divergence of the incident beam can be made lower. + """ return ( theta - sample_rotation + # - angle_to_center_of_beam + # "kappa" the natural incidence angle + # is a fixed parameter of the instrument - sc.scalar(0.245, unit='deg').to(unit='rad') ) @@ -39,6 +93,7 @@ def angle_of_divergence(theta, sample_rotation, angle_to_center_of_beam): def wavelength( event_time_offset, divergence_angle, L1, L2, chopper_phase, chopper_frequency ): + "Converts event_time_offset to wavelength using the chopper settings." out = event_time_offset.to(unit="ns", dtype="float64", copy=True) unit = out.bins.unit tau = (1 / (2 * chopper_frequency)).to(unit=unit) @@ -48,6 +103,7 @@ def wavelength( frame_bound = tau - tof_offset maximum = 2 * tau - tof_offset + # Frame unwrapping out += sc.where( (minimum < event_time_offset) & (event_time_offset < frame_bound), tof_offset, @@ -57,6 +113,8 @@ def wavelength( sc.scalar(np.nan, unit=unit), ), ) + # Correction for path length through guides being different + # depending on incident angle. out -= (divergence_angle.to(unit="deg") / (180.0 * sc.units.deg)) * tau out *= (sc.constants.h / sc.constants.m_n) / (L1 + L2) return out.to(unit='angstrom', copy=False) @@ -74,6 +132,7 @@ def add_common_coords_and_masks( wbins: WavelengthBins, beam_size: BeamSize[RunType], ) -> ReducibleData[RunType]: + "Adds coords and masks that are useful for both reference and sample measurements." da = da.transform_coords( ("wavelength", "theta", "angle_of_divergence", "Q"), { diff --git a/src/ess/amor/normalization.py b/src/ess/amor/normalization.py index 66764846..568ee8a6 100644 --- a/src/ess/amor/normalization.py +++ b/src/ess/amor/normalization.py @@ -36,13 +36,29 @@ def mask_events_where_supermirror_does_not_cover( mvalue: MValue, alpha: Alpha, ) -> Sample: + """ + Mask events in regions of the detector the reference does not cover. + + Regions of the detector that the reference + measurement doesn't cover cannot be used to compute reflectivity. + + Preferably the reference measurement should cover the entire + detector, but sometimes that is not possible, for example + if the supermirror :math:`M` value was too limited or because the reference + was measured at too high angle. + + To figure out what events need to be masked, + compute the supermirror reflectivity as a function + of the :math:`Q` the event would have had if it had belonged to + the reference measurement. + """ R = supermirror_reflectivity( reflectometry_q( sam.bins.coords["wavelength"], theta( sam.bins.coords["wavelength"], sam.coords["pixel_divergence_angle"], - sam.coords["L2"], + ref.coords["L2"], ref.coords["sample_rotation"], ref.coords["detector_rotation"], ), @@ -62,6 +78,11 @@ def reduce_reference( mvalue: MValue, alpha: Alpha, ) -> ReducedReference: + """ + Reduces the reference measurement to estimate the + intensity distribution in the detector for + an ideal sample with reflectivity :math:`R = 1`. + """ R = supermirror_reflectivity( reference.bins.coords['Q'], c=critical_edge, @@ -78,6 +99,13 @@ def reduce_sample_over_q( reference: Reference, qbins: QBins, ) -> ReflectivityOverQ: + """ + Computes reflectivity as ratio of + sample intensity and intensity from a sample + with ideal reflectivity. + + Returns reflectivity as a function of :math:`Q`. + """ h = reference.flatten(to='Q').hist(Q=qbins) R = sample.bins.concat().bin(Q=qbins) / h.data R.coords['Q_resolution'] = sc.sqrt( @@ -96,6 +124,13 @@ def reduce_sample_over_zw( reference: Reference, wbins: WavelengthBins, ) -> ReflectivityOverZW: + """ + Computes reflectivity as ratio of + sample intensity and intensity from a sample + with ideal reflectivity. + + Returns reflectivity as a function of ``blade``, ``wire`` and :math:`\\wavelength`. + """ R = sample.bins.concat(('stripe',)).bin(wavelength=wbins) / reference.data R.masks["too_few_events"] = reference.data < sc.scalar(1, unit="counts") return R @@ -107,6 +142,12 @@ def evaluate_reference( qbins: QBins, detector_spatial_resolution: DetectorSpatialResolution[SampleRun], ) -> Reference: + """ + Adds a :math:`Q` and :math:`Q`-resolution coordinate to each bin of the ideal + intensity distribution. The coordinates are computed as if the data came from + the sample measurement, that is, they use the ``sample_rotation`` + and ``detector_rotation`` parameters from the sample measurement. + """ ref = reference.copy() ref.coords["sample_rotation"] = sample.coords["sample_rotation"] ref.coords["detector_rotation"] = sample.coords["detector_rotation"] From acfcc05cc907a77457b3156426b504d25f2f4c5d Mon Sep 17 00:00:00 2001 From: jokasimr Date: Thu, 12 Dec 2024 16:26:12 +0100 Subject: [PATCH 16/37] Update src/ess/amor/load.py Co-authored-by: Jan-Lukas Wynen --- src/ess/amor/load.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/ess/amor/load.py b/src/ess/amor/load.py index 34bd08ea..acdb37b6 100644 --- a/src/ess/amor/load.py +++ b/src/ess/amor/load.py @@ -48,8 +48,7 @@ def load_events( .group(detector_numbers.data.flatten(to='event_id')) .fold("event_id", sizes=detector_numbers.sizes) ) - for name, coord in detector_numbers.coords.items(): - data.coords[name] = coord + data.coords.update(detector_numbers.coords) data.coords['z_index'] = ( Detector.nWires * data.coords['blade'] + data.coords['wire'] From 514df9b08ea1c8de76fdbc01cf8534d07968d327 Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Fri, 13 Dec 2024 14:52:38 +0100 Subject: [PATCH 17/37] fix: move natural incident constant to load function --- src/ess/amor/conversions.py | 10 +--------- src/ess/amor/geometry.py | 3 +++ src/ess/amor/load.py | 13 ++++++------- 3 files changed, 10 insertions(+), 16 deletions(-) diff --git a/src/ess/amor/conversions.py b/src/ess/amor/conversions.py index fc8b4e0c..9a225aa4 100644 --- a/src/ess/amor/conversions.py +++ b/src/ess/amor/conversions.py @@ -79,15 +79,7 @@ def angle_of_divergence(theta, sample_rotation, angle_to_center_of_beam): On the Amor instrument this is always in the interval [-0.75 deg, 0.75 deg], but the divergence of the incident beam can be made lower. """ - return ( - theta - - sample_rotation - # - - angle_to_center_of_beam - # "kappa" the natural incidence angle - # is a fixed parameter of the instrument - - sc.scalar(0.245, unit='deg').to(unit='rad') - ) + return theta - sample_rotation - angle_to_center_of_beam def wavelength( diff --git a/src/ess/amor/geometry.py b/src/ess/amor/geometry.py index 55d576c8..e9ee834e 100644 --- a/src/ess/amor/geometry.py +++ b/src/ess/amor/geometry.py @@ -66,4 +66,7 @@ def pixel_coordinates_in_detector_system() -> tuple[sc.Variable, sc.Variable]: / (Detector.distance + pixels.coords['wire'] * Detector.dX) ) ).to(unit='rad') + pixels.coords['z_index'] = ( + Detector.nWires * pixels.coords['blade'] + pixels.coords['wire'] + ) return pixels diff --git a/src/ess/amor/load.py b/src/ess/amor/load.py index acdb37b6..6701cff4 100644 --- a/src/ess/amor/load.py +++ b/src/ess/amor/load.py @@ -50,10 +50,6 @@ def load_events( ) data.coords.update(detector_numbers.coords) - data.coords['z_index'] = ( - Detector.nWires * data.coords['blade'] + data.coords['wire'] - ) - if data.bins.constituents["data"].data.variances is None: data.bins.constituents["data"].data.variances = data.bins.constituents[ "data" @@ -77,12 +73,12 @@ def amor_chopper(f: Filename[RunType]) -> RawChopper[RunType]: def load_amor_chopper_distance(ch: RawChopper[RunType]) -> ChopperDistance[RunType]: # We know the value has unit 'mm' - return sc.scalar(ch["distance"], unit="mm").to(unit="mm") + return sc.scalar(ch["distance"], unit="mm") def load_amor_chopper_separation(ch: RawChopper[RunType]) -> ChopperSeparation[RunType]: # We know the value has unit 'mm' - return sc.scalar(ch["pair_separation"], unit="mm").to(unit="mm") + return sc.scalar(ch["pair_separation"], unit="mm") def load_amor_ch_phase(ch: RawChopper[RunType]) -> ChopperPhase[RunType]: @@ -119,7 +115,10 @@ def load_amor_angle_from_horizon_to_center_of_incident_beam( fp: Filename[RunType], ) -> AngleCenterOfIncomingToHorizon[RunType]: (kad,) = load_nx(fp, "NXentry/NXinstrument/master_parameters/kad") - return sc.scalar(kad['value'].data['dim_1', 0]['time', 0].value, unit='deg') + natural_incident_angle = sc.scalar(0.245, unit='deg') + return natural_incident_angle + sc.scalar( + kad['value'].data['dim_1', 0]['time', 0].value, unit='deg' + ) providers = ( From 375b9794a101fd7c3851505dd7c64a2a86f0a371 Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Mon, 16 Dec 2024 11:15:40 +0100 Subject: [PATCH 18/37] fix: split coords, masking and correction into separate functions --- src/ess/amor/__init__.py | 12 +++++++++++- src/ess/amor/conversions.py | 36 +++++++++++++++--------------------- src/ess/amor/load.py | 3 +++ src/ess/amor/workflow.py | 27 +++++++++++++++++++++++++++ 4 files changed, 56 insertions(+), 22 deletions(-) create mode 100644 src/ess/amor/workflow.py diff --git a/src/ess/amor/__init__.py b/src/ess/amor/__init__.py index 29bbad99..c7d31942 100644 --- a/src/ess/amor/__init__.py +++ b/src/ess/amor/__init__.py @@ -16,7 +16,16 @@ SamplePosition, BeamDivergenceLimits, ) -from . import conversions, load, orso, resolution, utils, figures, normalization +from . import ( + conversions, + load, + orso, + resolution, + utils, + figures, + normalization, + workflow, +) from .instrument_view import instrument_view from .types import ( AngularResolution, @@ -44,6 +53,7 @@ *utils.providers, *figures.providers, *orso.providers, + *workflow.providers, ) """ List of providers for setting up a Sciline pipeline. diff --git a/src/ess/amor/conversions.py b/src/ess/amor/conversions.py index 9a225aa4..14636394 100644 --- a/src/ess/amor/conversions.py +++ b/src/ess/amor/conversions.py @@ -4,13 +4,8 @@ import scipp as sc from ..reflectometry.conversions import reflectometry_q -from ..reflectometry.corrections import footprint_on_sample from ..reflectometry.types import ( BeamDivergenceLimits, - BeamSize, - RawDetectorData, - ReducibleData, - RunType, WavelengthBins, YIndexLimits, ZIndexLimits, @@ -116,16 +111,11 @@ def _not_between(v, a, b): return (v < a) | (v > b) -def add_common_coords_and_masks( - da: RawDetectorData[RunType], - ylim: YIndexLimits, - zlims: ZIndexLimits, - bdlim: BeamDivergenceLimits, - wbins: WavelengthBins, - beam_size: BeamSize[RunType], -) -> ReducibleData[RunType]: - "Adds coords and masks that are useful for both reference and sample measurements." - da = da.transform_coords( +def add_coords( + da: sc.DataArray, +) -> sc.DataArray: + "Adds coords to the raw detector data" + return da.transform_coords( ("wavelength", "theta", "angle_of_divergence", "Q"), { "divergence_angle": "pixel_divergence_angle", @@ -137,6 +127,15 @@ def add_common_coords_and_masks( rename_dims=False, keep_intermediate=False, ) + + +def add_masks( + da: sc.DataArray, + ylim: YIndexLimits, + zlims: ZIndexLimits, + bdlim: BeamDivergenceLimits, + wbins: WavelengthBins, +): da.masks["stripe_range"] = _not_between(da.coords["stripe"], *ylim) da.masks['z_range'] = _not_between(da.coords["z_index"], *zlims) da.bins.masks["divergence_too_large"] = _not_between( @@ -149,12 +148,7 @@ def add_common_coords_and_masks( wbins[0], wbins[-1], ) - da /= footprint_on_sample( - da.bins.coords['theta'], - beam_size, - da.coords['sample_size'], - ) return da -providers = (add_common_coords_and_masks,) +providers = () diff --git a/src/ess/amor/load.py b/src/ess/amor/load.py index 6701cff4..74b0a37f 100644 --- a/src/ess/amor/load.py +++ b/src/ess/amor/load.py @@ -4,6 +4,7 @@ from ..reflectometry.load import load_nx from ..reflectometry.types import ( + BeamSize, DetectorRotation, Filename, LoadedNeXusDetector, @@ -39,6 +40,7 @@ def load_events( chopper_distance: ChopperDistance[RunType], chopper_separation: ChopperSeparation[RunType], sample_size: SampleSize[RunType], + beam_size: BeamSize[RunType], angle_to_center_of_beam: AngleCenterOfIncomingToHorizon[RunType], ) -> RawDetectorData[RunType]: detector_numbers = pixel_coordinates_in_detector_system() @@ -63,6 +65,7 @@ def load_events( data.coords["L1"] = sc.abs(chopper_distance) data.coords["L2"] = data.coords['distance_in_detector'] + Detector.distance data.coords["sample_size"] = sample_size + data.coords["beam_size"] = beam_size data.coords["angle_to_center_of_beam"] = angle_to_center_of_beam.to(unit='rad') return RawDetectorData[RunType](data) diff --git a/src/ess/amor/workflow.py b/src/ess/amor/workflow.py new file mode 100644 index 00000000..86b8abb2 --- /dev/null +++ b/src/ess/amor/workflow.py @@ -0,0 +1,27 @@ +from ..reflectometry.types import ( + BeamDivergenceLimits, + RawDetectorData, + ReducibleData, + RunType, + WavelengthBins, + YIndexLimits, + ZIndexLimits, +) +from .conversions import add_coords, add_masks +from .corrections import correct_by_footprint + + +def add_coords_masks_and_apply_corrections( + da: RawDetectorData[RunType], + ylim: YIndexLimits, + zlims: ZIndexLimits, + bdlim: BeamDivergenceLimits, + wbins: WavelengthBins, +) -> ReducibleData[RunType]: + da = add_coords(da) + da = add_masks(da, ylim, zlims, bdlim, wbins) + correct_by_footprint(da) + return da + + +providers = (add_coords_masks_and_apply_corrections,) From 459b4594de6a4bfdce658f02a752896cb17de05a Mon Sep 17 00:00:00 2001 From: jokasimr Date: Fri, 13 Dec 2024 10:38:05 +0100 Subject: [PATCH 19/37] Update src/ess/amor/conversions.py Co-authored-by: Jan-Lukas Wynen --- src/ess/amor/conversions.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/ess/amor/conversions.py b/src/ess/amor/conversions.py index 14636394..ac2354fc 100644 --- a/src/ess/amor/conversions.py +++ b/src/ess/amor/conversions.py @@ -126,6 +126,7 @@ def add_coords( }, rename_dims=False, keep_intermediate=False, + keep_aliases=False, ) From d8319fa8f6ce9ce8a852418dd26e05602de324d1 Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Mon, 16 Dec 2024 11:20:50 +0100 Subject: [PATCH 20/37] fix --- src/ess/amor/corrections.py | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 src/ess/amor/corrections.py diff --git a/src/ess/amor/corrections.py b/src/ess/amor/corrections.py new file mode 100644 index 00000000..8d5103cd --- /dev/null +++ b/src/ess/amor/corrections.py @@ -0,0 +1,11 @@ +import scipp as sc + +from ..reflectometry.corrections import footprint_on_sample + + +def correct_by_footprint(da: sc.DataArray) -> None: + da /= footprint_on_sample( + da.bins.coords['theta'], + da.coords['beam_size'], + da.coords['sample_size'], + ) From 28ac9f7f8b6af624c1cc7c568a320b8ef6deff1e Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Mon, 16 Dec 2024 11:34:39 +0100 Subject: [PATCH 21/37] add comment to explain why first value is used --- src/ess/amor/load.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/ess/amor/load.py b/src/ess/amor/load.py index 74b0a37f..09d0ec98 100644 --- a/src/ess/amor/load.py +++ b/src/ess/amor/load.py @@ -119,6 +119,9 @@ def load_amor_angle_from_horizon_to_center_of_incident_beam( ) -> AngleCenterOfIncomingToHorizon[RunType]: (kad,) = load_nx(fp, "NXentry/NXinstrument/master_parameters/kad") natural_incident_angle = sc.scalar(0.245, unit='deg') + # This value should not change during the run. + # If it does we assume the change was to small to be relevant. + # Therefore only the first value is read from the log. return natural_incident_angle + sc.scalar( kad['value'].data['dim_1', 0]['time', 0].value, unit='deg' ) From 1d81812f7f1239605f05446f92ccb46daa20aefd Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Mon, 16 Dec 2024 13:44:24 +0100 Subject: [PATCH 22/37] fix: move correction to common --- src/ess/amor/conversions.py | 7 ++++++- src/ess/amor/corrections.py | 11 ----------- src/ess/amor/workflow.py | 6 +++++- src/ess/reflectometry/corrections.py | 9 +++++++++ 4 files changed, 20 insertions(+), 13 deletions(-) delete mode 100644 src/ess/amor/corrections.py diff --git a/src/ess/amor/conversions.py b/src/ess/amor/conversions.py index ac2354fc..97d6666b 100644 --- a/src/ess/amor/conversions.py +++ b/src/ess/amor/conversions.py @@ -114,7 +114,7 @@ def _not_between(v, a, b): def add_coords( da: sc.DataArray, ) -> sc.DataArray: - "Adds coords to the raw detector data" + "Adds scattering coordinates to the raw detector data." return da.transform_coords( ("wavelength", "theta", "angle_of_divergence", "Q"), { @@ -137,6 +137,11 @@ def add_masks( bdlim: BeamDivergenceLimits, wbins: WavelengthBins, ): + """ + Masks the data by ranges in the detector + coordinates ``z`` and ``y``, and by the divergence of the beam, + and by wavelength. + """ da.masks["stripe_range"] = _not_between(da.coords["stripe"], *ylim) da.masks['z_range'] = _not_between(da.coords["z_index"], *zlims) da.bins.masks["divergence_too_large"] = _not_between( diff --git a/src/ess/amor/corrections.py b/src/ess/amor/corrections.py deleted file mode 100644 index 8d5103cd..00000000 --- a/src/ess/amor/corrections.py +++ /dev/null @@ -1,11 +0,0 @@ -import scipp as sc - -from ..reflectometry.corrections import footprint_on_sample - - -def correct_by_footprint(da: sc.DataArray) -> None: - da /= footprint_on_sample( - da.bins.coords['theta'], - da.coords['beam_size'], - da.coords['sample_size'], - ) diff --git a/src/ess/amor/workflow.py b/src/ess/amor/workflow.py index 86b8abb2..e4975207 100644 --- a/src/ess/amor/workflow.py +++ b/src/ess/amor/workflow.py @@ -1,3 +1,4 @@ +from ..reflectometry.corrections import correct_by_footprint from ..reflectometry.types import ( BeamDivergenceLimits, RawDetectorData, @@ -8,7 +9,6 @@ ZIndexLimits, ) from .conversions import add_coords, add_masks -from .corrections import correct_by_footprint def add_coords_masks_and_apply_corrections( @@ -18,6 +18,10 @@ def add_coords_masks_and_apply_corrections( bdlim: BeamDivergenceLimits, wbins: WavelengthBins, ) -> ReducibleData[RunType]: + """ + Computes coordinates, masks and corrections that are + the same for the sample measurement and the reference measurement. + """ da = add_coords(da) da = add_masks(da, ylim, zlims, bdlim, wbins) correct_by_footprint(da) diff --git a/src/ess/reflectometry/corrections.py b/src/ess/reflectometry/corrections.py index baf91de9..0cd8716a 100644 --- a/src/ess/reflectometry/corrections.py +++ b/src/ess/reflectometry/corrections.py @@ -28,3 +28,12 @@ def footprint_on_sample( """ size_of_beam_on_sample = beam_size / sc.sin(theta) return sc.erf(fwhm_to_std(sample_size / size_of_beam_on_sample)) + + +def correct_by_footprint(da: sc.DataArray) -> None: + "Corrects the data by the size of the footprint on the sample." + da /= footprint_on_sample( + da.bins.coords['theta'], + da.coords['beam_size'], + da.coords['sample_size'], + ) From afd5ce9322d2e03a809fd23d453a034b4ea7e5c3 Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Mon, 16 Dec 2024 14:12:54 +0100 Subject: [PATCH 23/37] refactor: extract common parts to ess/reflectometry --- src/ess/amor/normalization.py | 72 -------------------- src/ess/reflectometry/__init__.py | 3 +- src/ess/reflectometry/normalization.py | 91 ++++++++++++++++++++++++++ 3 files changed, 93 insertions(+), 73 deletions(-) create mode 100644 src/ess/reflectometry/normalization.py diff --git a/src/ess/amor/normalization.py b/src/ess/amor/normalization.py index 568ee8a6..ce2ea12c 100644 --- a/src/ess/amor/normalization.py +++ b/src/ess/amor/normalization.py @@ -13,12 +13,8 @@ ReducedReference, ReducibleData, Reference, - ReferenceRun, - ReflectivityOverQ, - ReflectivityOverZW, Sample, SampleRun, - WavelengthBins, ) from .conversions import theta from .resolution import ( @@ -71,71 +67,6 @@ def mask_events_where_supermirror_does_not_cover( return sam -def reduce_reference( - reference: ReducibleData[ReferenceRun], - wavelength_bins: WavelengthBins, - critical_edge: CriticalEdge, - mvalue: MValue, - alpha: Alpha, -) -> ReducedReference: - """ - Reduces the reference measurement to estimate the - intensity distribution in the detector for - an ideal sample with reflectivity :math:`R = 1`. - """ - R = supermirror_reflectivity( - reference.bins.coords['Q'], - c=critical_edge, - m=mvalue, - alpha=alpha, - ) - reference.bins.masks['invalid'] = sc.isnan(R) - reference /= R - return reference.bins.concat(('stripe',)).hist(wavelength=wavelength_bins) - - -def reduce_sample_over_q( - sample: Sample, - reference: Reference, - qbins: QBins, -) -> ReflectivityOverQ: - """ - Computes reflectivity as ratio of - sample intensity and intensity from a sample - with ideal reflectivity. - - Returns reflectivity as a function of :math:`Q`. - """ - h = reference.flatten(to='Q').hist(Q=qbins) - R = sample.bins.concat().bin(Q=qbins) / h.data - R.coords['Q_resolution'] = sc.sqrt( - ( - (reference * reference.coords['Q_resolution'] ** 2) - .flatten(to='Q') - .hist(Q=qbins) - ) - / h - ).data - return R - - -def reduce_sample_over_zw( - sample: Sample, - reference: Reference, - wbins: WavelengthBins, -) -> ReflectivityOverZW: - """ - Computes reflectivity as ratio of - sample intensity and intensity from a sample - with ideal reflectivity. - - Returns reflectivity as a function of ``blade``, ``wire`` and :math:`\\wavelength`. - """ - R = sample.bins.concat(('stripe',)).bin(wavelength=wbins) / reference.data - R.masks["too_few_events"] = reference.data < sc.scalar(1, unit="counts") - return R - - def evaluate_reference( reference: ReducedReference, sample: ReducibleData[SampleRun], @@ -177,9 +108,6 @@ def evaluate_reference( providers = ( - reduce_reference, - reduce_sample_over_q, - reduce_sample_over_zw, evaluate_reference, mask_events_where_supermirror_does_not_cover, ) diff --git a/src/ess/reflectometry/__init__.py b/src/ess/reflectometry/__init__.py index 6578fc76..60401bb7 100644 --- a/src/ess/reflectometry/__init__.py +++ b/src/ess/reflectometry/__init__.py @@ -10,12 +10,13 @@ __version__ = "0.0.0" -from . import conversions, orso +from . import conversions, orso, normalization from .load import load_reference, save_reference providers = ( *conversions.providers, *orso.providers, + *normalization.providers, ) """ List of providers for setting up a Sciline pipeline. diff --git a/src/ess/reflectometry/normalization.py b/src/ess/reflectometry/normalization.py new file mode 100644 index 00000000..7f17dbc1 --- /dev/null +++ b/src/ess/reflectometry/normalization.py @@ -0,0 +1,91 @@ +import scipp as sc + +from .supermirror import ( + Alpha, + CriticalEdge, + MValue, + supermirror_reflectivity, +) +from .types import ( + QBins, + ReducedReference, + ReducibleData, + Reference, + ReferenceRun, + ReflectivityOverQ, + ReflectivityOverZW, + Sample, + WavelengthBins, +) + + +def reduce_reference( + reference: ReducibleData[ReferenceRun], + wavelength_bins: WavelengthBins, + critical_edge: CriticalEdge, + mvalue: MValue, + alpha: Alpha, +) -> ReducedReference: + """ + Reduces the reference measurement to estimate the + intensity distribution in the detector for + an ideal sample with reflectivity :math:`R = 1`. + """ + R = supermirror_reflectivity( + reference.bins.coords['Q'], + c=critical_edge, + m=mvalue, + alpha=alpha, + ) + reference.bins.masks['invalid'] = sc.isnan(R) + reference /= R + return reference.bins.concat(('stripe',)).hist(wavelength=wavelength_bins) + + +def reduce_sample_over_q( + sample: Sample, + reference: Reference, + qbins: QBins, +) -> ReflectivityOverQ: + """ + Computes reflectivity as ratio of + sample intensity and intensity from a sample + with ideal reflectivity. + + Returns reflectivity as a function of :math:`Q`. + """ + h = reference.flatten(to='Q').hist(Q=qbins) + R = sample.bins.concat().bin(Q=qbins) / h.data + R.coords['Q_resolution'] = sc.sqrt( + ( + (reference * reference.coords['Q_resolution'] ** 2) + .flatten(to='Q') + .hist(Q=qbins) + ) + / h + ).data + return R + + +def reduce_sample_over_zw( + sample: Sample, + reference: Reference, + wbins: WavelengthBins, +) -> ReflectivityOverZW: + """ + Computes reflectivity as ratio of + sample intensity and intensity from a sample + with ideal reflectivity. + + Returns reflectivity as a function of ``blade``, ``wire`` and :math:`\\wavelength`. + """ + R = sample.bins.concat(('stripe',)).bin(wavelength=wbins) / reference.data + R.masks["too_few_events"] = reference.data < sc.scalar(1, unit="counts") + return R + + +providers = ( + reduce_reference, + reduce_sample_over_q, + reduce_sample_over_zw, +) From c0bef22ef1b89a785dc1eb775c3929804bc94733 Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Thu, 19 Dec 2024 19:42:08 +0100 Subject: [PATCH 24/37] fix: move L1,L2 definitions to add_coords provider --- src/ess/amor/conversions.py | 5 ++++- src/ess/amor/load.py | 7 +++---- src/ess/amor/resolution.py | 2 +- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/src/ess/amor/conversions.py b/src/ess/amor/conversions.py index 97d6666b..7f38f5b3 100644 --- a/src/ess/amor/conversions.py +++ b/src/ess/amor/conversions.py @@ -10,6 +10,7 @@ YIndexLimits, ZIndexLimits, ) +from .geometry import Detector def theta(wavelength, divergence_angle, L2, sample_rotation, detector_rotation): @@ -116,13 +117,15 @@ def add_coords( ) -> sc.DataArray: "Adds scattering coordinates to the raw detector data." return da.transform_coords( - ("wavelength", "theta", "angle_of_divergence", "Q"), + ("wavelength", "theta", "angle_of_divergence", "Q", "L1", "L2"), { "divergence_angle": "pixel_divergence_angle", "wavelength": wavelength, "theta": theta, "angle_of_divergence": angle_of_divergence, "Q": reflectometry_q, + "L1": lambda chopper_distance: sc.abs(chopper_distance), + "L2": lambda distance_in_detector: distance_in_detector + Detector.distance, }, rename_dims=False, keep_intermediate=False, diff --git a/src/ess/amor/load.py b/src/ess/amor/load.py index 09d0ec98..db7295c5 100644 --- a/src/ess/amor/load.py +++ b/src/ess/amor/load.py @@ -14,7 +14,7 @@ SampleRotation, SampleSize, ) -from .geometry import Detector, pixel_coordinates_in_detector_system +from .geometry import pixel_coordinates_in_detector_system from .types import ( AngleCenterOfIncomingToHorizon, ChopperDistance, @@ -61,9 +61,8 @@ def load_events( data.coords["detector_rotation"] = detector_rotation.to(unit='rad') data.coords["chopper_phase"] = chopper_phase data.coords["chopper_frequency"] = chopper_frequency - data.coords["chopper_separation"] = sc.abs(chopper_separation) - data.coords["L1"] = sc.abs(chopper_distance) - data.coords["L2"] = data.coords['distance_in_detector'] + Detector.distance + data.coords["chopper_separation"] = chopper_separation + data.coords["chopper_distance"] = chopper_distance data.coords["sample_size"] = sample_size data.coords["beam_size"] = beam_size data.coords["angle_to_center_of_beam"] = angle_to_center_of_beam.to(unit='rad') diff --git a/src/ess/amor/resolution.py b/src/ess/amor/resolution.py index c2fbf546..143aa0fc 100644 --- a/src/ess/amor/resolution.py +++ b/src/ess/amor/resolution.py @@ -29,7 +29,7 @@ def wavelength_resolution( : The wavelength resolution variable, as standard deviation. """ - return fwhm_to_std(chopper_separation / (L1 + L2)) + return fwhm_to_std(sc.abs(chopper_separation) / (L1 + L2)) def sample_size_resolution( From c0ce521007c1584992d1a088719e28e066863d38 Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Thu, 19 Dec 2024 19:45:33 +0100 Subject: [PATCH 25/37] fix: no practical difference but easier to understand --- src/ess/amor/normalization.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ess/amor/normalization.py b/src/ess/amor/normalization.py index ce2ea12c..02aaf541 100644 --- a/src/ess/amor/normalization.py +++ b/src/ess/amor/normalization.py @@ -54,7 +54,7 @@ def mask_events_where_supermirror_does_not_cover( theta( sam.bins.coords["wavelength"], sam.coords["pixel_divergence_angle"], - ref.coords["L2"], + sam.coords["L2"], ref.coords["sample_rotation"], ref.coords["detector_rotation"], ), From 6eed68bfc4aa6c5a9e91f45d91c55fc9aad030c0 Mon Sep 17 00:00:00 2001 From: jokasimr Date: Thu, 19 Dec 2024 19:48:35 +0100 Subject: [PATCH 26/37] Update src/ess/amor/normalization.py Co-authored-by: Jan-Lukas Wynen --- src/ess/amor/normalization.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ess/amor/normalization.py b/src/ess/amor/normalization.py index 02aaf541..60d8103d 100644 --- a/src/ess/amor/normalization.py +++ b/src/ess/amor/normalization.py @@ -67,7 +67,7 @@ def mask_events_where_supermirror_does_not_cover( return sam -def evaluate_reference( +def evaluate_reference_at_sample_coords( reference: ReducedReference, sample: ReducibleData[SampleRun], qbins: QBins, From 59b5a0a9215b72079896cf3fe1fa5232de0fc5c3 Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Thu, 19 Dec 2024 19:54:00 +0100 Subject: [PATCH 27/37] fix: name --- src/ess/amor/normalization.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ess/amor/normalization.py b/src/ess/amor/normalization.py index 60d8103d..4177c65b 100644 --- a/src/ess/amor/normalization.py +++ b/src/ess/amor/normalization.py @@ -108,6 +108,6 @@ def evaluate_reference_at_sample_coords( providers = ( - evaluate_reference, + evaluate_reference_at_sample_coords, mask_events_where_supermirror_does_not_cover, ) From efe84344adce2b9dd3f7f0595e61eb4f5db1b673 Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Thu, 19 Dec 2024 19:54:37 +0100 Subject: [PATCH 28/37] fix: remove typos and commented lines --- docs/user-guide/amor/amor-reduction.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/user-guide/amor/amor-reduction.ipynb b/docs/user-guide/amor/amor-reduction.ipynb index db4bc52b..22843087 100644 --- a/docs/user-guide/amor/amor-reduction.ipynb +++ b/docs/user-guide/amor/amor-reduction.ipynb @@ -166,7 +166,7 @@ "runs = {\n", " '608': {\n", " # The sample rotation values in the files are slightly off, so we replace\n", - " # them with corrected values._coord_coord\n", + " # them with corrected values.\n", " SampleRotation[SampleRun]: sc.scalar(0.85, unit='deg'),\n", " Filename[SampleRun]: amor.data.amor_sample_run('608'),\n", " },\n", @@ -272,8 +272,8 @@ " diagnostics[run_number] = workflow.compute((ReflectivityOverZW, ThetaBins[SampleRun]))\n", "\n", "# Scale the results using the scale factors computed earlier\n", - "#for run_number, scale_factor in zip(reflectivity.keys(), scale_factors, strict=True):\n", - "# diagnostics[run_number][ReflectivityOverZW] *= scale_factor" + "for run_number, scale_factor in zip(reflectivity.keys(), scale_factors, strict=True):\n", + " diagnostics[run_number][ReflectivityOverZW] *= scale_factor" ] }, { From c65c39ec83023571e121782712005df009afa499 Mon Sep 17 00:00:00 2001 From: jokasimr Date: Fri, 20 Dec 2024 11:29:39 +0100 Subject: [PATCH 29/37] Update src/ess/amor/load.py Co-authored-by: Jan-Lukas Wynen --- src/ess/amor/load.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ess/amor/load.py b/src/ess/amor/load.py index db7295c5..9737e78b 100644 --- a/src/ess/amor/load.py +++ b/src/ess/amor/load.py @@ -119,7 +119,7 @@ def load_amor_angle_from_horizon_to_center_of_incident_beam( (kad,) = load_nx(fp, "NXentry/NXinstrument/master_parameters/kad") natural_incident_angle = sc.scalar(0.245, unit='deg') # This value should not change during the run. - # If it does we assume the change was to small to be relevant. + # If it does we assume the change was too small to be relevant. # Therefore only the first value is read from the log. return natural_incident_angle + sc.scalar( kad['value'].data['dim_1', 0]['time', 0].value, unit='deg' From 9b28032f08729381087abf7108836174ed612952 Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Thu, 16 Jan 2025 10:11:39 +0100 Subject: [PATCH 30/37] fix: remove unit conversion --- src/ess/amor/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ess/amor/__init__.py b/src/ess/amor/__init__.py index c7d31942..1ef6d1e5 100644 --- a/src/ess/amor/__init__.py +++ b/src/ess/amor/__init__.py @@ -74,8 +74,8 @@ def default_parameters() -> dict: ChopperPhase[RunType]: sc.scalar(7.0, unit="deg"), ChopperFrequency[RunType]: sc.scalar(8.333, unit="Hz"), BeamDivergenceLimits: ( - sc.scalar(-0.75, unit='deg').to(unit='rad'), - sc.scalar(0.75, unit='deg').to(unit='rad'), + sc.scalar(-0.75, unit='deg'), + sc.scalar(0.75, unit='deg'), ), } From c43c8547a5734a8a78f761497ca90e75bc6c7b9d Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Thu, 16 Jan 2025 10:11:54 +0100 Subject: [PATCH 31/37] fix --- src/ess/reflectometry/types.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ess/reflectometry/types.py b/src/ess/reflectometry/types.py index 946858ab..dd31d465 100644 --- a/src/ess/reflectometry/types.py +++ b/src/ess/reflectometry/types.py @@ -40,7 +40,7 @@ class ReducibleData(sciline.Scope[RunType, sc.DataArray], sc.DataArray): ReducedReference = NewType("ReducedReference", sc.DataArray) """Intensity distribution on the detector for a sample with :math`R(Q) = 1`""" -Reference = NewType("NormalizationFactor", sc.DataArray) +Reference = NewType("Reference", sc.DataArray) """:code`ReducedReference` histogrammed in sample ``Q``""" Sample = NewType("Sample", sc.DataArray) From a9b25fde4c49f100488b464d65d90dee9ae8963e Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Thu, 16 Jan 2025 10:21:56 +0100 Subject: [PATCH 32/37] fix: add defensive unit conversions --- src/ess/amor/conversions.py | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/src/ess/amor/conversions.py b/src/ess/amor/conversions.py index 7f38f5b3..bac70cbd 100644 --- a/src/ess/amor/conversions.py +++ b/src/ess/amor/conversions.py @@ -60,10 +60,10 @@ def theta(wavelength, divergence_angle, L2, sample_rotation, detector_rotation): ''' c = sc.constants.g * sc.constants.m_n**2 / sc.constants.h**2 out = (c * L2 * wavelength**2).to(unit='dimensionless') + sc.sin( - divergence_angle + detector_rotation + divergence_angle.to(unit='rad') + detector_rotation.to(unit='rad') ) out = sc.asin(out, out=out) - out -= sample_rotation + out -= sample_rotation.to(unit='rad') return out @@ -75,7 +75,11 @@ def angle_of_divergence(theta, sample_rotation, angle_to_center_of_beam): On the Amor instrument this is always in the interval [-0.75 deg, 0.75 deg], but the divergence of the incident beam can be made lower. """ - return theta - sample_rotation - angle_to_center_of_beam + return ( + theta.to(unit='rad') + - sample_rotation.to(unit='rad') + - angle_to_center_of_beam.to(unit='rad') + ) def wavelength( @@ -84,8 +88,8 @@ def wavelength( "Converts event_time_offset to wavelength using the chopper settings." out = event_time_offset.to(unit="ns", dtype="float64", copy=True) unit = out.bins.unit - tau = (1 / (2 * chopper_frequency)).to(unit=unit) - tof_offset = tau * chopper_phase / (180.0 * sc.units.deg) + tau = (1 / (2 * chopper_frequency.to(unit='Hz'))).to(unit=unit) + tof_offset = tau * chopper_phase.to(unit='rad') / (np.pi * sc.units.rad) minimum = -tof_offset frame_bound = tau - tof_offset @@ -103,13 +107,13 @@ def wavelength( ) # Correction for path length through guides being different # depending on incident angle. - out -= (divergence_angle.to(unit="deg") / (180.0 * sc.units.deg)) * tau + out -= (divergence_angle.to(unit="rad") / (np.pi * sc.units.rad)) * tau out *= (sc.constants.h / sc.constants.m_n) / (L1 + L2) return out.to(unit='angstrom', copy=False) -def _not_between(v, a, b): - return (v < a) | (v > b) +def coordinate_transformation_graph(): + return def add_coords( @@ -133,6 +137,10 @@ def add_coords( ) +def _not_between(v, a, b): + return (v < a) | (v > b) + + def add_masks( da: sc.DataArray, ylim: YIndexLimits, From a206792e5500759dca4dff247cde95c98ff2aaa8 Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Thu, 16 Jan 2025 10:53:08 +0100 Subject: [PATCH 33/37] add coordinate transformation graph provider --- src/ess/amor/conversions.py | 26 ++++++++++++++------------ src/ess/amor/normalization.py | 6 +++--- src/ess/amor/types.py | 2 ++ src/ess/amor/workflow.py | 4 +++- 4 files changed, 22 insertions(+), 16 deletions(-) diff --git a/src/ess/amor/conversions.py b/src/ess/amor/conversions.py index bac70cbd..3c42cbac 100644 --- a/src/ess/amor/conversions.py +++ b/src/ess/amor/conversions.py @@ -11,6 +11,7 @@ ZIndexLimits, ) from .geometry import Detector +from .types import AmorCoordinates def theta(wavelength, divergence_angle, L2, sample_rotation, detector_rotation): @@ -112,25 +113,26 @@ def wavelength( return out.to(unit='angstrom', copy=False) -def coordinate_transformation_graph(): - return +def coordinate_transformation_graph() -> AmorCoordinates: + return { + "divergence_angle": "pixel_divergence_angle", + "wavelength": wavelength, + "theta": theta, + "angle_of_divergence": angle_of_divergence, + "Q": reflectometry_q, + "L1": lambda chopper_distance: sc.abs(chopper_distance), + "L2": lambda distance_in_detector: distance_in_detector + Detector.distance, + } def add_coords( da: sc.DataArray, + graph: dict, ) -> sc.DataArray: "Adds scattering coordinates to the raw detector data." return da.transform_coords( ("wavelength", "theta", "angle_of_divergence", "Q", "L1", "L2"), - { - "divergence_angle": "pixel_divergence_angle", - "wavelength": wavelength, - "theta": theta, - "angle_of_divergence": angle_of_divergence, - "Q": reflectometry_q, - "L1": lambda chopper_distance: sc.abs(chopper_distance), - "L2": lambda distance_in_detector: distance_in_detector + Detector.distance, - }, + graph, rename_dims=False, keep_intermediate=False, keep_aliases=False, @@ -168,4 +170,4 @@ def add_masks( return da -providers = () +providers = (coordinate_transformation_graph,) diff --git a/src/ess/amor/normalization.py b/src/ess/amor/normalization.py index 4177c65b..a3c30da9 100644 --- a/src/ess/amor/normalization.py +++ b/src/ess/amor/normalization.py @@ -23,6 +23,7 @@ sample_size_resolution, wavelength_resolution, ) +from .types import AmorCoordinates def mask_events_where_supermirror_does_not_cover( @@ -72,6 +73,7 @@ def evaluate_reference_at_sample_coords( sample: ReducibleData[SampleRun], qbins: QBins, detector_spatial_resolution: DetectorSpatialResolution[SampleRun], + graph: AmorCoordinates, ) -> Reference: """ Adds a :math:`Q` and :math:`Q`-resolution coordinate to each bin of the ideal @@ -94,9 +96,7 @@ def evaluate_reference_at_sample_coords( "Q_resolution", ), { - "divergence_angle": "pixel_divergence_angle", - "theta": theta, - "Q": reflectometry_q, + **graph, "wavelength_resolution": wavelength_resolution, "sample_size_resolution": sample_size_resolution, "angular_resolution": angular_resolution, diff --git a/src/ess/amor/types.py b/src/ess/amor/types.py index 4157a5ee..16e153d0 100644 --- a/src/ess/amor/types.py +++ b/src/ess/amor/types.py @@ -9,6 +9,8 @@ AngularResolution = NewType("AngularResolution", sc.Variable) SampleSizeResolution = NewType("SampleSizeResolution", sc.Variable) +AmorCoordinates = NewType("AmorCoordinates", dict) + class ChopperFrequency(sciline.Scope[RunType, sc.Variable], sc.Variable): """Frequency of the choppers used in the run.""" diff --git a/src/ess/amor/workflow.py b/src/ess/amor/workflow.py index e4975207..36e004b6 100644 --- a/src/ess/amor/workflow.py +++ b/src/ess/amor/workflow.py @@ -9,6 +9,7 @@ ZIndexLimits, ) from .conversions import add_coords, add_masks +from .types import AmorCoordinates def add_coords_masks_and_apply_corrections( @@ -17,12 +18,13 @@ def add_coords_masks_and_apply_corrections( zlims: ZIndexLimits, bdlim: BeamDivergenceLimits, wbins: WavelengthBins, + graph: AmorCoordinates, ) -> ReducibleData[RunType]: """ Computes coordinates, masks and corrections that are the same for the sample measurement and the reference measurement. """ - da = add_coords(da) + da = add_coords(da, graph) da = add_masks(da, ylim, zlims, bdlim, wbins) correct_by_footprint(da) return da From f399b6a1aa48642dcd7247c103df86c0d5305edf Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Thu, 16 Jan 2025 10:53:36 +0100 Subject: [PATCH 34/37] type annotations --- src/ess/reflectometry/supermirror/__init__.py | 6 ++++-- src/ess/reflectometry/supermirror/types.py | 2 -- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/ess/reflectometry/supermirror/__init__.py b/src/ess/reflectometry/supermirror/__init__.py index ac8ad97b..6607face 100644 --- a/src/ess/reflectometry/supermirror/__init__.py +++ b/src/ess/reflectometry/supermirror/__init__.py @@ -4,10 +4,12 @@ import scipp as sc import numpy as np -from .types import Alpha, CriticalEdge, MValue, SupermirrorReflectivityCorrection +from .types import Alpha, CriticalEdge, MValue -def supermirror_reflectivity(q: sc.Variable, c: CriticalEdge, m: MValue, alpha: Alpha): +def supermirror_reflectivity( + q: sc.Variable, c: CriticalEdge, m: MValue, alpha: Alpha +) -> sc.Variable: """ Returns the reflectivity of the supermirror. For ``q`` outside of the region of known reflectivity diff --git a/src/ess/reflectometry/supermirror/types.py b/src/ess/reflectometry/supermirror/types.py index ddd4024c..b8e42239 100644 --- a/src/ess/reflectometry/supermirror/types.py +++ b/src/ess/reflectometry/supermirror/types.py @@ -9,5 +9,3 @@ '''Critical edge value of the supermirror''' Alpha = NewType('Alpha', sc.Variable) ''':math:`\\alpha` value of the supermirror''' -SupermirrorReflectivityCorrection = NewType('SupermirrorCalibrationFactor', sc.Variable) -'''Compensates the finite reflectivity of the supermirror''' From 3abbe62000e786b7171998f9c457085844d6eb4b Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Thu, 16 Jan 2025 13:30:11 +0100 Subject: [PATCH 35/37] fix: copy = false --- src/ess/amor/conversions.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/ess/amor/conversions.py b/src/ess/amor/conversions.py index 3c42cbac..9ef68b82 100644 --- a/src/ess/amor/conversions.py +++ b/src/ess/amor/conversions.py @@ -61,7 +61,7 @@ def theta(wavelength, divergence_angle, L2, sample_rotation, detector_rotation): ''' c = sc.constants.g * sc.constants.m_n**2 / sc.constants.h**2 out = (c * L2 * wavelength**2).to(unit='dimensionless') + sc.sin( - divergence_angle.to(unit='rad') + detector_rotation.to(unit='rad') + divergence_angle.to(unit='rad', copy=False) + detector_rotation.to(unit='rad') ) out = sc.asin(out, out=out) out -= sample_rotation.to(unit='rad') @@ -77,9 +77,9 @@ def angle_of_divergence(theta, sample_rotation, angle_to_center_of_beam): but the divergence of the incident beam can be made lower. """ return ( - theta.to(unit='rad') + theta.to(unit='rad', copy=False) - sample_rotation.to(unit='rad') - - angle_to_center_of_beam.to(unit='rad') + - angle_to_center_of_beam.to(unit='rad', copy=False) ) From b76daddacd013b890d24714626dc61dbde789dc0 Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Thu, 16 Jan 2025 13:35:05 +0100 Subject: [PATCH 36/37] refactor: rename coordinate transformation graph --- src/ess/amor/conversions.py | 4 ++-- src/ess/amor/normalization.py | 4 ++-- src/ess/amor/types.py | 2 +- src/ess/amor/workflow.py | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/ess/amor/conversions.py b/src/ess/amor/conversions.py index 9ef68b82..17ce7b00 100644 --- a/src/ess/amor/conversions.py +++ b/src/ess/amor/conversions.py @@ -11,7 +11,7 @@ ZIndexLimits, ) from .geometry import Detector -from .types import AmorCoordinates +from .types import AmorCoordTransformationGraph def theta(wavelength, divergence_angle, L2, sample_rotation, detector_rotation): @@ -113,7 +113,7 @@ def wavelength( return out.to(unit='angstrom', copy=False) -def coordinate_transformation_graph() -> AmorCoordinates: +def coordinate_transformation_graph() -> AmorCoordTransformationGraph: return { "divergence_angle": "pixel_divergence_angle", "wavelength": wavelength, diff --git a/src/ess/amor/normalization.py b/src/ess/amor/normalization.py index a3c30da9..eb8adf94 100644 --- a/src/ess/amor/normalization.py +++ b/src/ess/amor/normalization.py @@ -23,7 +23,7 @@ sample_size_resolution, wavelength_resolution, ) -from .types import AmorCoordinates +from .types import AmorCoordTransformationGraph def mask_events_where_supermirror_does_not_cover( @@ -73,7 +73,7 @@ def evaluate_reference_at_sample_coords( sample: ReducibleData[SampleRun], qbins: QBins, detector_spatial_resolution: DetectorSpatialResolution[SampleRun], - graph: AmorCoordinates, + graph: AmorCoordTransformationGraph, ) -> Reference: """ Adds a :math:`Q` and :math:`Q`-resolution coordinate to each bin of the ideal diff --git a/src/ess/amor/types.py b/src/ess/amor/types.py index 16e153d0..06bfdb70 100644 --- a/src/ess/amor/types.py +++ b/src/ess/amor/types.py @@ -9,7 +9,7 @@ AngularResolution = NewType("AngularResolution", sc.Variable) SampleSizeResolution = NewType("SampleSizeResolution", sc.Variable) -AmorCoordinates = NewType("AmorCoordinates", dict) +AmorCoordTransformationGraph = NewType("AmorCoordTransformationGraph", dict) class ChopperFrequency(sciline.Scope[RunType, sc.Variable], sc.Variable): diff --git a/src/ess/amor/workflow.py b/src/ess/amor/workflow.py index 36e004b6..027d0231 100644 --- a/src/ess/amor/workflow.py +++ b/src/ess/amor/workflow.py @@ -9,7 +9,7 @@ ZIndexLimits, ) from .conversions import add_coords, add_masks -from .types import AmorCoordinates +from .types import AmorCoordTransformationGraph def add_coords_masks_and_apply_corrections( @@ -18,7 +18,7 @@ def add_coords_masks_and_apply_corrections( zlims: ZIndexLimits, bdlim: BeamDivergenceLimits, wbins: WavelengthBins, - graph: AmorCoordinates, + graph: AmorCoordTransformationGraph, ) -> ReducibleData[RunType]: """ Computes coordinates, masks and corrections that are From b9eae749466c47ed01ba709366ce05f38526d0ca Mon Sep 17 00:00:00 2001 From: Johannes Kasimir Date: Thu, 16 Jan 2025 13:59:45 +0100 Subject: [PATCH 37/37] refactor: rename coord transformation domain type --- src/ess/amor/conversions.py | 4 ++-- src/ess/amor/normalization.py | 4 ++-- src/ess/amor/types.py | 2 +- src/ess/amor/workflow.py | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/ess/amor/conversions.py b/src/ess/amor/conversions.py index 17ce7b00..17fcc302 100644 --- a/src/ess/amor/conversions.py +++ b/src/ess/amor/conversions.py @@ -11,7 +11,7 @@ ZIndexLimits, ) from .geometry import Detector -from .types import AmorCoordTransformationGraph +from .types import CoordTransformationGraph def theta(wavelength, divergence_angle, L2, sample_rotation, detector_rotation): @@ -113,7 +113,7 @@ def wavelength( return out.to(unit='angstrom', copy=False) -def coordinate_transformation_graph() -> AmorCoordTransformationGraph: +def coordinate_transformation_graph() -> CoordTransformationGraph: return { "divergence_angle": "pixel_divergence_angle", "wavelength": wavelength, diff --git a/src/ess/amor/normalization.py b/src/ess/amor/normalization.py index eb8adf94..dff6f951 100644 --- a/src/ess/amor/normalization.py +++ b/src/ess/amor/normalization.py @@ -23,7 +23,7 @@ sample_size_resolution, wavelength_resolution, ) -from .types import AmorCoordTransformationGraph +from .types import CoordTransformationGraph def mask_events_where_supermirror_does_not_cover( @@ -73,7 +73,7 @@ def evaluate_reference_at_sample_coords( sample: ReducibleData[SampleRun], qbins: QBins, detector_spatial_resolution: DetectorSpatialResolution[SampleRun], - graph: AmorCoordTransformationGraph, + graph: CoordTransformationGraph, ) -> Reference: """ Adds a :math:`Q` and :math:`Q`-resolution coordinate to each bin of the ideal diff --git a/src/ess/amor/types.py b/src/ess/amor/types.py index 06bfdb70..eade3cfc 100644 --- a/src/ess/amor/types.py +++ b/src/ess/amor/types.py @@ -9,7 +9,7 @@ AngularResolution = NewType("AngularResolution", sc.Variable) SampleSizeResolution = NewType("SampleSizeResolution", sc.Variable) -AmorCoordTransformationGraph = NewType("AmorCoordTransformationGraph", dict) +CoordTransformationGraph = NewType("CoordTransformationGraph", dict) class ChopperFrequency(sciline.Scope[RunType, sc.Variable], sc.Variable): diff --git a/src/ess/amor/workflow.py b/src/ess/amor/workflow.py index 027d0231..f1cbf997 100644 --- a/src/ess/amor/workflow.py +++ b/src/ess/amor/workflow.py @@ -9,7 +9,7 @@ ZIndexLimits, ) from .conversions import add_coords, add_masks -from .types import AmorCoordTransformationGraph +from .types import CoordTransformationGraph def add_coords_masks_and_apply_corrections( @@ -18,7 +18,7 @@ def add_coords_masks_and_apply_corrections( zlims: ZIndexLimits, bdlim: BeamDivergenceLimits, wbins: WavelengthBins, - graph: AmorCoordTransformationGraph, + graph: CoordTransformationGraph, ) -> ReducibleData[RunType]: """ Computes coordinates, masks and corrections that are