diff --git a/docs/user-guide/zoom.ipynb b/docs/user-guide/zoom.ipynb index a45232c..7a89e42 100644 --- a/docs/user-guide/zoom.ipynb +++ b/docs/user-guide/zoom.ipynb @@ -18,11 +18,13 @@ "outputs": [], "source": [ "%matplotlib widget\n", + "import plopp\n", "import sciline\n", "import scipp as sc\n", "from ess import polarization as pol\n", "from ess import sans\n", "from ess import isissans as isis\n", + "import ess.isissans.data # noqa: F401\n", "from ess.sans.types import *" ] }, @@ -33,33 +35,49 @@ "metadata": {}, "outputs": [], "source": [ - "sans_workflow = isis.zoom.ZoomWorkflow()\n", - "sans_workflow = sans.with_pixel_mask_filenames(sans_workflow, []) # no masks" + "from pathlib import Path\n", + "\n", + "data_folder = Path('zoom-data-2025-07')\n", + "# Runs with analyzer at 4 different times\n", + "cell_runs = [\n", + " str(data_folder / '3He_unpolarized_in' / f'ZOOM00038{run}.nxs')\n", + " for run in [423, 425, 427, 429, 431, 433, 435, 437]\n", + "]\n", + "empty_run = data_folder / 'Background_files' / 'ZOOM00038336.nxs'\n", + "depolarized_run = data_folder / 'Background_files' / 'ZOOM00038470.nxs'\n", + "cell_runs = [*cell_runs, depolarized_run]" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "id": "3", "metadata": {}, - "outputs": [], "source": [ - "from pathlib import Path\n", - "\n", - "data_folder = Path('zoom_polarized_data')\n", - "# Runs with analyzer at 4 different times\n", - "cell_runs = [str(data_folder / f'ZOOM00022{run}.nxs') for run in [710, 712, 714, 716]]\n", - "empty_run = data_folder / 'ZOOM00034787.nxs'\n", - "depolarized_run = data_folder / 'ZOOM00022718.nxs'\n", - "cell_runs = [*cell_runs, depolarized_run]" + "## Setup SANS workflow for computing transmission fraction" ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "id": "4", "metadata": {}, + "outputs": [], "source": [ - "## Setup SANS workflow for computing transmission fraction" + "params = {\n", + " WavelengthBins: sc.geomspace(\n", + " 'wavelength', start=1.75, stop=16.5, num=141, unit='Å'\n", + " ),\n", + " isis.mantidio.CalibrationWorkspace: None,\n", + " # Spectrum numbers used for histogram files (Workspace2D)\n", + " isis.MonitorSpectrumNumber[Incident]: isis.MonitorSpectrumNumber[Incident](3),\n", + " isis.MonitorSpectrumNumber[Transmission]: isis.MonitorSpectrumNumber[Transmission](\n", + " 4\n", + " ),\n", + " # Monitor names used for event files (EventWorkspace)\n", + " NeXusMonitorName[Incident]: NeXusMonitorName[Incident](\"monitor3\"),\n", + " NeXusMonitorName[Transmission]: NeXusMonitorName[Transmission](\"monitor4\"),\n", + " UncertaintyBroadcastMode: UncertaintyBroadcastMode.upper_bound,\n", + "}" ] }, { @@ -72,12 +90,10 @@ "from ess.polarization.zoom import ZoomTransmissionFractionWorkflow\n", "\n", "sans_workflow = ZoomTransmissionFractionWorkflow(cell_runs)\n", - "sans_workflow[Filename[EmptyBeamRun]] = str(empty_run)\n", - "sans_workflow[WavelengthBins] = sc.geomspace(\n", - " 'wavelength', start=1.75, stop=16.5, num=141, unit='Å'\n", - ")\n", + "for key, value in params.items():\n", + " sans_workflow[key] = value\n", "sans_workflow[isis.mantidio.Period] = 0\n", - "sans_workflow[isis.mantidio.CalibrationWorkspace] = None" + "sans_workflow[Filename[EmptyBeamRun]] = str(empty_run)" ] }, { @@ -158,7 +174,9 @@ "metadata": {}, "outputs": [], "source": [ - "sans_workflow.visualize(TransmissionFraction[SampleRun], graph_attr={'rankdir': 'LR'})" + "sans_workflow.visualize(\n", + " TransmissionFraction[SampleRun], graph_attr={'rankdir': 'LR'}, compact=True\n", + ")" ] }, { @@ -234,9 +252,8 @@ "metadata": {}, "outputs": [], "source": [ - "# TODO Which wavelength bounds should be used?\n", - "wav_min = 2.2 * sc.Unit('angstrom')\n", - "wav_max = 2.8 * sc.Unit('angstrom')\n", + "wav_min = 1.75 * sc.Unit('angstrom')\n", + "wav_max = 16.5 * sc.Unit('angstrom')\n", "transmission_truncated = raw_transmission['wavelength', wav_min:wav_max]\n", "transmission_depolarized = transmission_truncated['time', -1].copy()\n", "transmission = transmission_truncated['time', :-1].copy()" @@ -330,7 +347,13 @@ "metadata": {}, "outputs": [], "source": [ - "trans = func(wavelength=wavelength, time=time, plus_minus='plus')\n", + "trans = sc.DataArray(\n", + " func(wavelength=wavelength, time=time, plus_minus='plus'),\n", + " coords={\n", + " 'wavelength': wavelength,\n", + " 'time': time,\n", + " },\n", + ")\n", "sc.DataGroup(\n", " {f\"{time:c}\": trans['time', time] for time in trans.coords['time'][::20]}\n", ").plot(norm='linear', linestyle='solid', marker=None)" @@ -343,7 +366,13 @@ "metadata": {}, "outputs": [], "source": [ - "trans = func(wavelength=wavelength, time=time, plus_minus='plus')\n", + "trans = sc.DataArray(\n", + " func(wavelength=wavelength, time=time, plus_minus='plus'),\n", + " coords={\n", + " 'wavelength': wavelength,\n", + " 'time': time,\n", + " },\n", + ")\n", "sc.DataGroup(\n", " {f\"{wav:c}\": trans['wavelength', wav] for wav in trans.coords['wavelength'][::20]}\n", ").plot(norm='log', linestyle='solid', marker=None)" @@ -383,6 +412,154 @@ "cell_type": "markdown", "id": "30", "metadata": {}, + "source": [ + "## Sample data\n", + "\n", + "We now proceed to process the sample data.\n", + "We use the Zoom workflow from ESSsans, configured as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "31", + "metadata": {}, + "outputs": [], + "source": [ + "def make_sample_workflow() -> sciline.Pipeline:\n", + " wf = isis.zoom.ZoomWorkflow()\n", + " wf.insert(isis.io.transmission_from_background_run)\n", + " wf.insert(isis.io.transmission_from_sample_run)\n", + "\n", + " # TODO Are these masks correct? Provide correct XML files!\n", + " masks = isis.data.zoom_tutorial_mask_filenames()\n", + " wf = sans.with_pixel_mask_filenames(wf, masks)\n", + "\n", + " for key, value in params.items():\n", + " wf[key] = value\n", + " wf[QBins] = sc.geomspace(dim='Q', start=0.004, stop=0.8, num=141, unit='1/angstrom')\n", + " wf[QxBins] = sc.linspace('Qx', start=-0.3, stop=0.3, num=101, unit='1/angstrom')\n", + " wf[QyBins] = sc.linspace('Qy', start=-0.3, stop=0.3, num=101, unit='1/angstrom')\n", + " wf[NonBackgroundWavelengthRange] = sc.array(\n", + " dims=['wavelength'], values=[0.7, 17.1], unit='angstrom'\n", + " )\n", + " wf[CorrectForGravity] = True\n", + " wf[UncertaintyBroadcastMode] = UncertaintyBroadcastMode.upper_bound\n", + " wf[ReturnEvents] = True\n", + "\n", + " # TODO Configure offsets and beam center correctly!\n", + " # sample_workflow[isis.SampleOffset] = sc.vector([0.0, 0.0, 0.11], unit='m')\n", + " # sample_workflow[isis.DetectorBankOffset] = sc.vector([0.0, 0.0, 0.5], unit='m')\n", + " wf[BeamCenter] = sc.vector([0.0, 0.0, 0.0], unit='m')\n", + "\n", + " # TODO Use direct beam!\n", + " # sample_workflow[DirectBeamFilename]\n", + " wf[DirectBeam] = None\n", + "\n", + " return wf" + ] + }, + { + "cell_type": "markdown", + "id": "32", + "metadata": {}, + "source": [ + "The sample runs are event data, so we can simply merge all runs to obtain a single event-mode $I(Q)$.\n", + "We use `sans.with_sample_runs` to obtain a workflow that merges the runs and computes the $I(Q)$:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "33", + "metadata": {}, + "outputs": [], + "source": [ + "water_folder = data_folder / 'Sample_water_cell3'\n", + "sample_runs = [str(water_folder / f'ZOOM00038{run}.nxs') for run in range(439, 466, 2)]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "34", + "metadata": {}, + "outputs": [], + "source": [ + "sample_workflow = make_sample_workflow()\n", + "sample_workflow[Filename[EmptyBeamRun]] = str(empty_run)\n", + "multi_run_sample_workflow = sans.with_sample_runs(sample_workflow, runs=sample_runs)" + ] + }, + { + "cell_type": "markdown", + "id": "35", + "metadata": {}, + "source": [ + "The sample workflow looks like this:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "36", + "metadata": {}, + "outputs": [], + "source": [ + "multi_run_sample_workflow.visualize(\n", + " IofQ[SampleRun], graph_attr={'rankdir': 'LR'}, compact=True\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "37", + "metadata": {}, + "source": [ + "ESSsans does not support processing multiple periods (and there are no plans for this, as this is ISIS-specific).\n", + "We therefore run the workflow once per period.\n", + "Note that is is inefficient as it loads every file four times." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "38", + "metadata": {}, + "outputs": [], + "source": [ + "# WARNING: Running this cell takes a couple of minutes when all sample runs are used.\n", + "\n", + "# TODO Is this correct? We get very large times. Are we using the correct transmission\n", + "# runs above?\n", + "time_base = transmission.coords['datetime'].min()\n", + "\n", + "periods = []\n", + "for period in range(4):\n", + " multi_run_sample_workflow[isis.mantidio.Period] = period\n", + " iofq = multi_run_sample_workflow.compute(IofQxy[SampleRun])\n", + " iofq = iofq.bins.assign_coords(\n", + " time=iofq.bins.coords['pulse_time'].to(unit=time_base.unit) - time_base\n", + " ).drop_coords('wavelength') # Wavelength bounds get in the way\n", + " periods.append(iofq)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "39", + "metadata": {}, + "outputs": [], + "source": [ + "plopp.slicer(\n", + " sc.values(sc.concat(periods, 'period').hist(pulse_time=10)), keep=('Qx', 'Qy')\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "40", + "metadata": {}, "source": [ "## Correction workflow\n", "\n", @@ -394,7 +571,7 @@ { "cell_type": "code", "execution_count": null, - "id": "31", + "id": "41", "metadata": {}, "outputs": [], "source": [ @@ -404,7 +581,7 @@ }, { "cell_type": "markdown", - "id": "32", + "id": "42", "metadata": {}, "source": [ "We will use a second-order polynomial supermirror efficiency function:" @@ -413,16 +590,17 @@ { "cell_type": "code", "execution_count": null, - "id": "33", + "id": "43", "metadata": {}, "outputs": [], "source": [ - "# Note that these coefficients are meaningless, please fill in correct values!\n", + "from ess.polarization.data import example_polarization_efficiency_table\n", + "\n", "supermirror_workflow[pol.SupermirrorEfficiencyFunction[pol.Polarizer]] = (\n", - " pol.SecondDegreePolynomialEfficiency(\n", - " a=0.5 * sc.Unit('1/angstrom**2'),\n", - " b=0.4 * sc.Unit('1/angstrom'),\n", - " c=0.3 * sc.Unit('dimensionless'),\n", + " pol.EfficiencyLookupTable.from_file(\n", + " example_polarization_efficiency_table(),\n", + " wavelength_colname='# X ',\n", + " efficiency_colname=' Y ',\n", " )\n", ")" ] @@ -430,7 +608,7 @@ { "cell_type": "code", "execution_count": null, - "id": "34", + "id": "44", "metadata": {}, "outputs": [], "source": [ @@ -442,7 +620,7 @@ }, { "cell_type": "markdown", - "id": "35", + "id": "45", "metadata": {}, "source": [ "For a single channel, the complete workflow looks as follows:" @@ -451,7 +629,7 @@ { "cell_type": "code", "execution_count": null, - "id": "36", + "id": "46", "metadata": {}, "outputs": [], "source": [ @@ -459,11 +637,80 @@ " pol.PolarizationCorrectedData[pol.Up, pol.Up], graph_attr={'rankdir': 'LR'}\n", ")" ] + }, + { + "cell_type": "markdown", + "id": "47", + "metadata": {}, + "source": [ + "We can now set the reduced sample data for each measured channel:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "48", + "metadata": {}, + "outputs": [], + "source": [ + "workflow[pol.ReducedSampleDataBySpinChannel[pol.Up, pol.Up]] = periods[0]\n", + "workflow[pol.ReducedSampleDataBySpinChannel[pol.Down, pol.Up]] = periods[1]\n", + "workflow[pol.ReducedSampleDataBySpinChannel[pol.Up, pol.Down]] = periods[2]\n", + "workflow[pol.ReducedSampleDataBySpinChannel[pol.Down, pol.Down]] = periods[3]" + ] + }, + { + "cell_type": "markdown", + "id": "49", + "metadata": {}, + "source": [ + "The result for a single input channel (up-up) is:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "50", + "metadata": {}, + "outputs": [], + "source": [ + "channels = workflow.compute(pol.PolarizationCorrectedData[pol.Up, pol.Up])\n", + "tiled = plopp.tiled(2, 2)\n", + "tiled[0, 0] = channels.upup.nanhist().plot()\n", + "tiled[0, 1] = channels.updown.nanhist().plot()\n", + "tiled[1, 0] = channels.downup.nanhist().plot()\n", + "tiled[1, 1] = channels.downdown.nanhist().plot()\n", + "tiled" + ] + }, + { + "cell_type": "markdown", + "id": "51", + "metadata": {}, + "source": [ + "The combination of all four channels is:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "52", + "metadata": {}, + "outputs": [], + "source": [ + "channels = workflow.compute(pol.TotalPolarizationCorrectedData)\n", + "tiled = plopp.tiled(2, 2)\n", + "tiled[0, 0] = channels.upup.nanhist().plot()\n", + "tiled[0, 1] = channels.updown.nanhist().plot()\n", + "tiled[1, 0] = channels.downup.nanhist().plot()\n", + "tiled[1, 1] = channels.downdown.nanhist().plot()\n", + "tiled" + ] } ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "dev311", "language": "python", "name": "python3" }, @@ -477,7 +724,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.12" + "version": "3.11.13" }, "nbsphinx": { "execute": "never" diff --git a/src/ess/polarization/correction.py b/src/ess/polarization/correction.py index 83528f0..3c1c5fd 100644 --- a/src/ess/polarization/correction.py +++ b/src/ess/polarization/correction.py @@ -103,6 +103,9 @@ def compute_polarizing_element_correction( : Correction matrix coefficients. """ + if isinstance(channel, sc.Variable | sc.DataArray) and channel.bins is not None: + channel = channel.bins + t_plus = transmission.apply(channel, 'plus') t_minus = transmission.apply(channel, 'minus') t_minus *= -1 diff --git a/src/ess/polarization/he3.py b/src/ess/polarization/he3.py index 47153be..42483e1 100644 --- a/src/ess/polarization/he3.py +++ b/src/ess/polarization/he3.py @@ -99,10 +99,7 @@ def __call__(self, wavelength: sc.Variable) -> sc.Variable: scale = broadcast_with_upper_bound_variances( self.opacity0, prototype=wavelength ) - return sc.DataArray( - (scale * wavelength).to(unit='', copy=False), - coords={'wavelength': wavelength}, - ) + return (scale * wavelength).to(unit='', copy=False) def he3_opacity_from_cell_params( @@ -192,7 +189,7 @@ def T1(self) -> sc.Variable: return self._T1 def __call__(self, time: sc.Variable) -> sc.Variable: - return sc.DataArray(self.C * sc.exp(-time / self.T1), coords={'time': time}) + return self.C * sc.exp(-time / self.T1) @dataclass @@ -214,7 +211,7 @@ def __call__( polarization *= -plus_minus return self.transmission_empty_glass * sc.exp(-opacity * (1.0 + polarization)) - def apply(self, data: sc.DataArray, plus_minus: PlusMinus) -> sc.DataArray: + def apply(self, data: sc.DataArray, plus_minus: PlusMinus) -> sc.Variable: return self( time=data.coords['time'], wavelength=data.coords['wavelength'], diff --git a/src/ess/polarization/supermirror.py b/src/ess/polarization/supermirror.py index d983ecd..1276117 100644 --- a/src/ess/polarization/supermirror.py +++ b/src/ess/polarization/supermirror.py @@ -17,7 +17,7 @@ class SupermirrorEfficiencyFunction(Generic[PolarizingElement], ABC): """Base class for supermirror efficiency functions""" @abstractmethod - def __call__(self, *, wavelength: sc.Variable) -> sc.DataArray: + def __call__(self, *, wavelength: sc.Variable) -> sc.Variable: """Return the efficiency of a supermirror for a given wavelength""" @@ -44,7 +44,7 @@ class SecondDegreePolynomialEfficiency( b: sc.Variable c: sc.Variable - def __call__(self, *, wavelength: sc.Variable) -> sc.DataArray: + def __call__(self, *, wavelength: sc.Variable) -> sc.Variable: """Return the efficiency of a supermirror for a given wavelength""" return ( (self.a * wavelength**2).to(unit='', copy=False) @@ -71,9 +71,9 @@ def __post_init__(self): table = self.table if self.table.variances is None else sc.values(self.table) self._lut = sc.lookup(table, 'wavelength') - def __call__(self, *, wavelength: sc.Variable) -> sc.DataArray: + def __call__(self, *, wavelength: sc.Variable) -> sc.Variable: """Return the efficiency of a supermirror for a given wavelength""" - return sc.DataArray(self._lut(wavelength), coords={'wavelength': wavelength}) + return self._lut(wavelength) @classmethod def from_file( @@ -107,7 +107,7 @@ class SupermirrorTransmissionFunction(TransmissionFunction[PolarizingElement]): def __call__( self, *, wavelength: sc.Variable, plus_minus: PlusMinus - ) -> sc.DataArray: + ) -> sc.Variable: """Return the transmission fraction for a given wavelength""" efficiency = self.efficiency_function(wavelength=wavelength) if plus_minus == 'plus': @@ -115,7 +115,7 @@ def __call__( else: return 0.5 * (1 - efficiency) - def apply(self, data: sc.DataArray, plus_minus: PlusMinus) -> sc.DataArray: + def apply(self, data: sc.DataArray, plus_minus: PlusMinus) -> sc.Variable: """Apply the transmission function to a data array""" return self(wavelength=data.coords['wavelength'], plus_minus=plus_minus) diff --git a/src/ess/polarization/types.py b/src/ess/polarization/types.py index 16a28c8..55ec9a0 100644 --- a/src/ess/polarization/types.py +++ b/src/ess/polarization/types.py @@ -32,7 +32,7 @@ class TransmissionFunction(Generic[PolarizingElement], ABC): """Wavelength- and time-dependent transmission for a given cell.""" @abstractmethod - def apply(self, data: sc.DataArray, plus_minus: PlusMinus) -> sc.DataArray: ... + def apply(self, data: sc.DataArray, plus_minus: PlusMinus) -> sc.Variable: ... @dataclass @@ -85,6 +85,12 @@ class PolarizationCorrectedData(Generic[PolarizerSpin, AnalyzerSpin]): downup: sc.DataArray downdown: sc.DataArray + def __post_init__(self): + self.upup.name = '(up, up)' + self.updown.name = '(up, down)' + self.downup.name = '(down, up)' + self.downdown.name = '(down, down)' + """The sum of polarization corrected data from all flipper state channels.""" TotalPolarizationCorrectedData = NewType( diff --git a/src/ess/polarization/zoom.py b/src/ess/polarization/zoom.py index 4fd3933..384fc5f 100644 --- a/src/ess/polarization/zoom.py +++ b/src/ess/polarization/zoom.py @@ -1,22 +1,15 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright (c) 2023 Scipp contributors (https://github.com/scipp) -import threading from collections.abc import Sequence -from dataclasses import dataclass -from typing import Generic -import mantid.api as _mantid_api import sciline as sl import scipp as sc -from mantid import simpleapi as _mantid_simpleapi from scippnexus import NXsource import ess.isissans as isis from ess.isissans.io import LoadedFileContents -from ess.isissans.mantidio import DataWorkspace, Period from ess.reduce.nexus.types import Position from ess.sans.types import ( - EmptyBeamRun, Filename, Incident, MonitorType, @@ -26,7 +19,6 @@ SampleRun, Transmission, TransmissionRun, - UncertaintyBroadcastMode, ) # In this case the "sample" is the analyzer cell, of which we want to measure @@ -34,45 +26,6 @@ sample_run_type = RunType -def load_histogrammed_run( - filename: Filename[sample_run_type], period: Period -) -> DataWorkspace[sample_run_type]: - """Load a non-event-data ISIS file""" - # Loading many small files with Mantid is, for some reason, very slow when using - # the default number of threads in the Dask threaded scheduler (1 thread worked - # best, 2 is a bit slower but still fast). We can either limit that thread count, - # or add a lock here, which is more specific. - with load_histogrammed_run.lock: - loaded = _mantid_simpleapi.Load(Filename=str(filename), StoreInADS=False) - if isinstance(loaded, _mantid_api.Workspace): - # A single workspace - data_ws = loaded - if isinstance(data_ws, _mantid_api.WorkspaceGroup): - if period is None: - raise ValueError( - f'Needs {Period} to be set to know what ' - 'section of the event data to load' - ) - data_ws = data_ws.getItem(period) - else: - # Separate data and monitor workspaces - data_ws = loaded.OutputWorkspace - if isinstance(data_ws, _mantid_api.WorkspaceGroup): - if period is None: - raise ValueError( - f'Needs {Period} to be set to know what ' - 'section of the event data to load' - ) - data_ws = data_ws.getItem(period) - data_ws.setMonitorWorkspace(loaded.MonitorWorkspace.getItem(period)) - else: - data_ws.setMonitorWorkspace(loaded.MonitorWorkspace) - return DataWorkspace[sample_run_type](data_ws) - - -load_histogrammed_run.lock = threading.Lock() - - def _get_time(dg: sc.DataGroup) -> sc.Variable: start = sc.datetime(dg['run_start'].value) end = sc.datetime(dg['run_end'].value) @@ -100,46 +53,24 @@ def _get_unique_position(*positions: sc.DataArray) -> sc.DataArray: return unique -@dataclass -class MonitorSpectrumNumber(Generic[MonitorType]): - value: int - - -def get_monitor_data( - dg: LoadedFileContents[RunType], nexus_name: NeXusMonitorName[MonitorType] +def get_monitor_data_no_variances( + dg: LoadedFileContents[RunType], + nexus_name: NeXusMonitorName[MonitorType], + spectrum_number: isis.MonitorSpectrumNumber[MonitorType], ) -> NeXusComponent[MonitorType, RunType]: """ Same as :py:func:`ess.isissans.get_monitor_data` but dropping variances. - - Dropping variances is a workaround required since ESSsans does not handle - variance broadcasting when combining monitors. In our case some of the monitors - are time-dependent, so this is required for now. """ - # See https://github.com/scipp/sciline/issues/52 why copy needed - mon = dg['monitors'][nexus_name]['data'].copy() - return NeXusComponent[MonitorType, RunType]( - sc.DataGroup(data=sc.values(mon), position=mon.coords['position']) + monitor = isis.general.get_monitor_data( + dg, nexus_name=nexus_name, spectrum_number=spectrum_number ) - - -def get_monitor_data_from_empty_beam_run( - dg: LoadedFileContents[EmptyBeamRun], - spectrum_number: MonitorSpectrumNumber[MonitorType], -) -> NeXusComponent[MonitorType, EmptyBeamRun]: - """ - Extract incident or transmission monitor from ZOOM empty beam run - - The files in this case do not contain detector data, only monitor data. Mantid - stores this as a Workspace2D, where each spectrum corresponds to a monitor. - """ - # Note we index with a scipp.Variable, i.e., by the spectrum number used at ISIS - monitor = sc.values(dg["data"]["spectrum", sc.index(spectrum_number.value)]).copy() - return sc.DataGroup(data=monitor, position=monitor.coords['position']) + monitor['data'] = sc.values(monitor['data']) + return NeXusComponent[MonitorType, RunType](monitor) def get_monitor_data_from_transmission_run( dg: LoadedFileContents[TransmissionRun[RunType]], - spectrum_number: MonitorSpectrumNumber[MonitorType], + spectrum_number: isis.MonitorSpectrumNumber[MonitorType], ) -> NeXusComponent[MonitorType, TransmissionRun[RunType]]: """ Extract incident or transmission monitor from ZOOM direct-beam run @@ -172,10 +103,8 @@ def ZoomTransmissionFractionWorkflow(runs: Sequence[str]) -> sl.Pipeline: List of filenames of the runs to use for the transmission fraction. """ workflow = isis.zoom.ZoomWorkflow() - workflow.insert(get_monitor_data) - workflow.insert(get_monitor_data_from_empty_beam_run) + workflow.insert(get_monitor_data_no_variances) workflow.insert(get_monitor_data_from_transmission_run) - workflow.insert(load_histogrammed_run) mapped = workflow.map({Filename[TransmissionRun[SampleRun]]: runs}) for mon_type in (Incident, Transmission): @@ -186,16 +115,4 @@ def ZoomTransmissionFractionWorkflow(runs: Sequence[str]) -> sl.Pipeline: Position[NXsource, TransmissionRun[SampleRun]] ].reduce(func=_get_unique_position) - # We are dealing with two different types of files, and monitors are identified - # differently in each case, so there is some duplication here. - workflow[MonitorSpectrumNumber[Incident]] = MonitorSpectrumNumber[Incident](3) - workflow[MonitorSpectrumNumber[Transmission]] = MonitorSpectrumNumber[Transmission]( - 4 - ) - workflow[NeXusMonitorName[Incident]] = NeXusMonitorName[Incident]("monitor3") - workflow[NeXusMonitorName[Transmission]] = NeXusMonitorName[Transmission]( - "monitor4" - ) - workflow[UncertaintyBroadcastMode] = UncertaintyBroadcastMode.upper_bound - return workflow diff --git a/tests/correction_test.py b/tests/correction_test.py index c7c9562..fe356d3 100644 --- a/tests/correction_test.py +++ b/tests/correction_test.py @@ -68,6 +68,30 @@ def test_compute_polarizing_element_correction() -> None: assert_allclose(off_diag, -transmission_minus / denom) +def test_compute_polarizing_element_correction_binned_data() -> None: + time = sc.linspace('event', 1, 10, 10, unit='') + wavelength = sc.linspace('event', 0.1, 1, 10, unit='') + events = sc.DataArray( + sc.arange('event', 10), + coords={'time': time, 'wavelength': wavelength}, + ) + binned = sc.bins( + data=events, dim='event', begin=sc.array(dims=['Q'], values=[0, 3], unit=None) + ) + transmission = SimpleTransmissionFunction() + + result = compute_polarizing_element_correction( + channel=binned, transmission=transmission + ) + diag = result.diag + off_diag = result.off_diag + transmission_plus = transmission(time, wavelength, 'plus') + transmission_minus = transmission(time, wavelength, 'minus') + denom = transmission_plus**2 - transmission_minus**2 + assert_allclose(diag.bins.concat().value, transmission_plus / denom) + assert_allclose(off_diag.bins.concat().value, -transmission_minus / denom) + + class FakeTransmissionFunction: def __init__(self, coeffs: np.ndarray) -> None: self.coeffs = coeffs diff --git a/tests/he3/opacity_test.py b/tests/he3/opacity_test.py index 8ddaaab..3f4af08 100644 --- a/tests/he3/opacity_test.py +++ b/tests/he3/opacity_test.py @@ -18,7 +18,7 @@ def test_opacity_from_cell_params() -> None: pressure=pressure, length=length, temperature=temperature ) opacity_function = he3.he3_opacity_function_from_cell_opacity(opacity0) - opacity = opacity_function(wavelength).data + opacity = opacity_function(wavelength) assert_identical(2 * opacity['pressure', 0], opacity['pressure', 1]) assert_identical(2 * opacity['cell_length', 0], opacity['cell_length', 1]) assert_identical(2 * opacity['wavelength', 0], opacity['wavelength', 1]) @@ -38,7 +38,7 @@ def test_opacity_from_cell_params_reproduces_literature_value() -> None: pressure=pressure, length=length, temperature=temperature ) opacity_function = he3.he3_opacity_function_from_cell_opacity(opacity0) - opacity = opacity_function(wavelength).data + opacity = opacity_function(wavelength) assert sc.isclose(opacity, sc.scalar(0.0733, unit=''), rtol=sc.scalar(1e-3)) @@ -64,7 +64,7 @@ def test_opacity_from_beam_data() -> None: transmission_fraction=transmission, opacity0_initial_guess=opacity0 * 1.23, # starting guess imperfect ) - opacity = opacity_function(wavelength).data + opacity = opacity_function(wavelength) assert sc.isclose( opacity_function.opacity0, opacity0.to(unit=opacity_function.opacity0.unit) ) diff --git a/tests/he3/polarization_test.py b/tests/he3/polarization_test.py index f19aaba..49a9312 100644 --- a/tests/he3/polarization_test.py +++ b/tests/he3/polarization_test.py @@ -18,10 +18,16 @@ def test_incoming_unpolarized_reproduces_input_params_within_errors() -> None: transmission_empty_glass = sc.scalar(0.9) opacity = opacity_function(wavelength) polarization = polarization_function(time) - transmission = he3.transmission_incoming_unpolarized( - transmission_empty_glass=transmission_empty_glass, - opacity=opacity, - polarization=polarization, + transmission = sc.DataArray( + he3.transmission_incoming_unpolarized( + transmission_empty_glass=transmission_empty_glass, + opacity=opacity, + polarization=polarization, + ), + coords={ + 'time': time, + 'wavelength': wavelength, + }, ) result = he3.get_he3_transmission_incoming_unpolarized_from_fit_to_direct_beam( @@ -67,11 +73,23 @@ def test_incoming_polarized_reproduces_input_params_within_errors() -> None: polarization_function=polarization_function, ) # State switch at 456th time point, cut further below into 4 channels total. - plus = transmission_function( - time=time[:456], wavelength=wavelength, plus_minus='plus' + plus = sc.DataArray( + transmission_function( + time=time[:456], wavelength=wavelength, plus_minus='plus' + ), + coords={ + 'time': time[:456], + 'wavelength': wavelength, + }, ) - minus = transmission_function( - time=time[456:], wavelength=wavelength, plus_minus='minus' + minus = sc.DataArray( + transmission_function( + time=time[456:], wavelength=wavelength, plus_minus='minus' + ), + coords={ + 'time': time[456:], + 'wavelength': wavelength, + }, ) result = he3.get_he3_transmission_incoming_polarized_from_fit_to_direct_beam( @@ -121,11 +139,23 @@ def test_incoming_polarized_raises_if_plus_minus_coord_is_bad() -> None: polarization_function=polarization_function, ) # State switch at 456th time point, cut further below into 4 channels total. - plus = transmission_function( - time=time[:456], wavelength=wavelength, plus_minus='plus' + plus = sc.DataArray( + transmission_function( + time=time[:456], wavelength=wavelength, plus_minus='plus' + ), + coords={ + 'time': time[:456], + 'wavelength': wavelength, + }, ) - minus = transmission_function( - time=time[456:], wavelength=wavelength, plus_minus='minus' + minus = sc.DataArray( + transmission_function( + time=time[456:], wavelength=wavelength, plus_minus='minus' + ), + coords={ + 'time': time[456:], + 'wavelength': wavelength, + }, ) with pytest.raises(ValueError, match='plus-minus coordinate of plus channel'): diff --git a/tests/supermirror_test.py b/tests/supermirror_test.py index a9afd44..2ff4de3 100644 --- a/tests/supermirror_test.py +++ b/tests/supermirror_test.py @@ -78,10 +78,7 @@ def test_EfficiencyLookupTable_returns_expected_result(): ) ) x = sc.midpoints(sc.linspace('wavelength', 0, 1, 10, unit='angstrom')) - assert_allclose( - tab(wavelength=x), - sc.DataArray(sc.values(tab.table.data), coords={'wavelength': x}), - ) + assert_allclose(tab(wavelength=x), sc.values(tab.table.data)) def test_EfficiencyLookupTable_load_from_file():