diff --git a/Code/Mantid/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/ISISIndirectDiffractionReduction.py b/Code/Mantid/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/ISISIndirectDiffractionReduction.py new file mode 100644 index 000000000000..f5146bd436a7 --- /dev/null +++ b/Code/Mantid/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/ISISIndirectDiffractionReduction.py @@ -0,0 +1,212 @@ +#pylint: disable=no-init,too-many-instance-attributes + +from mantid.simpleapi import * +from mantid.api import * +from mantid.kernel import * +from mantid import config + +import os + + +class ISISIndirectDiffractionReduction(DataProcessorAlgorithm): + + _workspace_names = None + _chopped_data = None + _output_ws = None + _data_files = None + _instrument_name = None + _mode = None + _spectra_range = None + _grouping_method = None + _rebin_string = None + _ipf_filename = None + _sum_files = None + + + def category(self): + return 'Diffraction;PythonAlgorithms' + + + def summary(self): + return 'Performs a diffraction reduction for a set of raw run files for an ISIS indirect spectrometer' + + + def PyInit(self): + self.declareProperty(StringArrayProperty(name='InputFiles'), + doc='Comma separated list of input files.') + + self.declareProperty(name='SumFiles', defaultValue=False, + doc='Enabled to sum spectra from each input file.') + + self.declareProperty(name='Instrument', defaultValue='IRIS', + validator=StringListValidator(['IRIS', 'OSIRIS', 'TOSCA', 'VESUVIO']), + doc='Instrument used for run') + + self.declareProperty(name='Mode', defaultValue='diffspec', + validator=StringListValidator(['diffspec', 'diffonly']), + doc='Diffraction mode used') + + self.declareProperty(IntArrayProperty(name='SpectraRange'), + doc='Range of spectra to use.') + + self.declareProperty(name='RebinParam', defaultValue='', + doc='Rebin parameters.') + + self.declareProperty(name='GroupingPolicy', defaultValue='All', + validator=StringListValidator(['All', 'Individual', 'IPF']), + doc='Selects the type of detector grouping to be used.') + + self.declareProperty(WorkspaceGroupProperty('OutputWorkspace', '', + direction=Direction.Output), + doc='Group name for the result workspaces.') + + + def validateInputs(self): + """ + Checks for issues with user input. + """ + issues = dict() + + # Validate input files + input_files = self.getProperty('InputFiles').value + if len(input_files) == 0: + issues['InputFiles'] = 'InputFiles must contain at least one filename' + + # Validate detector range + detector_range = self.getProperty('SpectraRange').value + if len(detector_range) != 2: + issues['SpectraRange'] = 'SpectraRange must be an array of 2 values only' + else: + if detector_range[0] > detector_range[1]: + issues['SpectraRange'] = 'SpectraRange must be in format [lower_index,upper_index]' + + return issues + + + def PyExec(self): + from IndirectReductionCommon import (load_files, + get_multi_frame_rebin, + identify_bad_detectors, + unwrap_monitor, + process_monitor_efficiency, + scale_monitor, + scale_detectors, + rebin_reduction, + group_spectra, + fold_chopped, + rename_reduction) + + self._setup() + + load_opts = dict() + if self._instrument_name == 'VESUVIO': + load_opts['Mode'] = 'FoilOut' + + self._workspace_names, self._chopped_data = load_files(self._data_files, + self._ipf_filename, + self._spectra_range[0], + self._spectra_range[1], + self._sum_files, + load_opts) + + for c_ws_name in self._workspace_names: + is_multi_frame = isinstance(mtd[c_ws_name], WorkspaceGroup) + + # Get list of workspaces + if is_multi_frame: + workspaces = mtd[c_ws_name].getNames() + else: + workspaces = [c_ws_name] + + # Process rebinning for framed data + rebin_string_2, num_bins = get_multi_frame_rebin(c_ws_name, + self._rebin_string) + + masked_detectors = identify_bad_detectors(workspaces[0]) + + # Process workspaces + for ws_name in workspaces: + monitor_ws_name = ws_name + '_mon' + + # Process monitor + if not unwrap_monitor(ws_name): + ConvertUnits(InputWorkspace=monitor_ws_name, + OutputWorkspace=monitor_ws_name, + Target='Wavelength', + EMode='Elastic') + + process_monitor_efficiency(ws_name) + scale_monitor(ws_name) + + # Scale detector data by monitor intensities + scale_detectors(ws_name, 'Elastic') + + # Remove the no longer needed monitor workspace + DeleteWorkspace(monitor_ws_name) + + # Convert to dSpacing + ConvertUnits(InputWorkspace=ws_name, + OutputWorkspace=ws_name, + Target='dSpacing', + EMode='Elastic') + + # Handle rebinning + rebin_reduction(ws_name, + self._rebin_string, + rebin_string_2, + num_bins) + + # Group spectra + group_spectra(ws_name, + masked_detectors, + self._grouping_method) + + if is_multi_frame: + fold_chopped(c_ws_name) + + # Rename output workspaces + output_workspace_names = [rename_reduction(ws_name, self._sum_files) for ws_name in self._workspace_names] + + # Group result workspaces + GroupWorkspaces(InputWorkspaces=output_workspace_names, + OutputWorkspace=self._output_ws) + + self.setProperty('OutputWorkspace', self._output_ws) + + + def _setup(self): + """ + Gets algorithm properties. + """ + + self._output_ws = self.getPropertyValue('OutputWorkspace') + self._data_files = self.getProperty('InputFiles').value + self._instrument_name = self.getPropertyValue('Instrument') + self._mode = self.getPropertyValue('Mode') + self._spectra_range = self.getProperty('SpectraRange').value + self._rebin_string = self.getPropertyValue('RebinParam') + self._grouping_method = self.getPropertyValue('GroupingPolicy') + + if self._rebin_string == '': + self._rebin_string = None + + # Get the IPF filename + self._ipf_filename = self._instrument_name + '_diffraction_' + self._mode + '_Parameters.xml' + if not os.path.exists(self._ipf_filename): + self._ipf_filename = os.path.join(config['instrumentDefinition.directory'], self._ipf_filename) + logger.information('IPF filename is: %s' % (self._ipf_filename)) + + # Only enable sum files if we actually have more than one file + sum_files = self.getProperty('SumFiles').value + self._sum_files = False + + if sum_files: + num_raw_files = len(self._data_files) + if num_raw_files > 1: + self._sum_files = True + logger.information('Summing files enabled (have %d files)' % num_raw_files) + else: + logger.information('SumFiles options is ignored when only one file is provided') + + +AlgorithmFactory.subscribe(ISISIndirectDiffractionReduction) diff --git a/Code/Mantid/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/ISISIndirectEnergyTransfer.py b/Code/Mantid/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/ISISIndirectEnergyTransfer.py index f8f7840f5d5e..1524e3aa7b9e 100644 --- a/Code/Mantid/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/ISISIndirectEnergyTransfer.py +++ b/Code/Mantid/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/ISISIndirectEnergyTransfer.py @@ -2,10 +2,9 @@ from mantid.kernel import * from mantid.api import * from mantid.simpleapi import * -import mantid +from mantid import config + import os -import string -import numpy as np _str_or_none = lambda s: s if s != '' else None @@ -72,8 +71,26 @@ def PyInit(self): def PyExec(self): + from IndirectReductionCommon import (load_files, + get_multi_frame_rebin, + identify_bad_detectors, + unwrap_monitor, + process_monitor_efficiency, + scale_monitor, + scale_detectors, + rebin_reduction, + group_spectra, + fold_chopped, + rename_reduction, + save_reduction, + plot_reduction) + self._setup() - self._load_files() + self._workspace_names, self._chopped_data = load_files(self._data_files, + self._ipf_filename, + self._spectra_range[0], + self._spectra_range[1], + self._sum_files) for c_ws_name in self._workspace_names: is_multi_frame = isinstance(mtd[c_ws_name], WorkspaceGroup) @@ -85,34 +102,30 @@ def PyExec(self): workspaces = [c_ws_name] # Process rebinning for framed data - if self._rebin_string is not None and is_multi_frame: - rebin_string_comp = self._rebin_string.split(',') - if len(rebin_string_comp) >= 5: - rebin_string_2 = ','.join(rebin_string_comp[2:]) - else: - rebin_string_2 = self._rebin_string - - bin_counts = [mtd[ws].blocksize() for ws in mtd[c_ws_name].getNames()] - num_bins = np.amax(bin_counts) + rebin_string_2, num_bins = get_multi_frame_rebin(c_ws_name, + self._rebin_string) - masked_detectors = self._identify_bad_detectors(workspaces[0]) + masked_detectors = identify_bad_detectors(workspaces[0]) # Process workspaces for ws_name in workspaces: monitor_ws_name = ws_name + '_mon' # Process monitor - if not self._unwrap_monitor(ws_name): - ConvertUnits(InputWorkspace=monitor_ws_name, OutputWorkspace=monitor_ws_name, Target='Wavelength', EMode='Elastic') - - self._process_monitor_efficiency(ws_name) - self._scale_monitor(ws_name) + if not unwrap_monitor(ws_name): + ConvertUnits(InputWorkspace=monitor_ws_name, + OutputWorkspace=monitor_ws_name, + Target='Wavelength', + EMode='Elastic') + process_monitor_efficiency(ws_name) + scale_monitor(ws_name) # Do background removal if a range was provided if self._background_range is not None: ConvertToDistribution(Workspace=ws_name) - CalculateFlatBackground(InputWorkspace=ws_name, OutputWorkspace=ws_name, + CalculateFlatBackground(InputWorkspace=ws_name, + OutputWorkspace=ws_name, StartX=self._background_range[0], EndX=self._background_range[1], Mode='Mean') @@ -125,73 +138,78 @@ def PyExec(self): OutputWorkspace=ws_name) # Scale detector data by monitor intensities - ConvertUnits(InputWorkspace=ws_name, OutputWorkspace=ws_name, Target='Wavelength', EMode='Indirect') - RebinToWorkspace(WorkspaceToRebin=ws_name, WorkspaceToMatch=monitor_ws_name, OutputWorkspace=ws_name) - Divide(LHSWorkspace=ws_name, RHSWorkspace=monitor_ws_name, OutputWorkspace=ws_name) + scale_detectors(ws_name, 'Indirect') # Remove the no longer needed monitor workspace DeleteWorkspace(monitor_ws_name) # Convert to energy - ConvertUnits(InputWorkspace=ws_name, OutputWorkspace=ws_name, Target='DeltaE', EMode='Indirect') - CorrectKiKf(InputWorkspace=ws_name, OutputWorkspace=ws_name, EMode='Indirect') + ConvertUnits(InputWorkspace=ws_name, + OutputWorkspace=ws_name, + Target='DeltaE', + EMode='Indirect') + CorrectKiKf(InputWorkspace=ws_name, + OutputWorkspace=ws_name, + EMode='Indirect') # Handle rebinning - if self._rebin_string is not None: - if is_multi_frame: - # Mulit frame data - if mtd[ws_name].blocksize() == num_bins: - Rebin(InputWorkspace=ws_name, OutputWorkspace=ws_name, Params=self._rebin_string) - else: - Rebin(InputWorkspace=ws_name, OutputWorkspace=ws_name, Params=rebin_string_2) - else: - # Regular data - Rebin(InputWorkspace=ws_name, OutputWorkspace=ws_name, Params=self._rebin_string) - else: - try: - # If user does not want to rebin then just ensure uniform binning across spectra - RebinToWorkspace(WorkspaceToRebin=ws_name, WorkspaceToMatch=ws_name, OutputWorkspace=ws_name) - except RuntimeError: - logger.warning('Rebinning failed, will try to continue anyway.') + rebin_reduction(ws_name, + self._rebin_string, + rebin_string_2, + num_bins) # Detailed balance if self._detailed_balance is not None: corr_factor = 11.606 / (2 * self._detailed_balance) - ExponentialCorrection(InputWorkspaces=ws_name, OutputWorkspace=ws_name, - C0=1.0, C1=corr_factor, Operation='Multiply') + ExponentialCorrection(InputWorkspaces=ws_name, + OutputWorkspace=ws_name, + C0=1.0, + C1=corr_factor, + Operation='Multiply') # Scale if self._scale_factor != 1.0: - Scale(InputWorkspaces=ws_name, OutputWorkspace=ws_name, - Factor=self._scale_factor, Operation='Multiply') + Scale(InputWorkspaces=ws_name, + OutputWorkspace=ws_name, + Factor=self._scale_factor, + Operation='Multiply') # Group spectra - self._group_spectra(ws_name, masked_detectors) + group_spectra(ws_name, + masked_detectors, + self._grouping_method, + self._grouping_map_file, + self._grouping_ws) if self._fold_multiple_frames and is_multi_frame: - self._fold_chopped(c_ws_name) + fold_chopped(c_ws_name) # Convert to output units if needed if self._output_x_units != 'DeltaE': - ConvertUnits(InputWorkspace=c_ws_name, OutputWorkspace=c_ws_name, - EMode='Indirect', Target=self._output_x_units) + ConvertUnits(InputWorkspace=c_ws_name, + OutputWorkspace=c_ws_name, + EMode='Indirect', + Target=self._output_x_units) # Rename output workspaces - output_workspace_names = [self._rename_workspace(ws_name) for ws_name in self._workspace_names] + output_workspace_names = [rename_reduction(ws_name, self._sum_files) for ws_name in self._workspace_names] # Save result workspaces if self._save_formats is not None: - self._save(output_workspace_names) + save_reduction(output_workspace_names, + self._save_formats, + self._output_x_units) # Group result workspaces - GroupWorkspaces(InputWorkspaces=output_workspace_names, OutputWorkspace=self._output_ws) + GroupWorkspaces(InputWorkspaces=output_workspace_names, + OutputWorkspace=self._output_ws) self.setProperty('OutputWorkspace', self._output_ws) # Plot result workspaces if self._plot_type != 'None': for ws_name in mtd[self._output_ws].getNames(): - self._plot_workspace(ws_name) + plot_reduction(ws_name, self._plot_type) def validateInputs(self): @@ -300,541 +318,5 @@ def _setup(self): self._workspace_names = [] - def _load_files(self): - """ - Loads a set of files and extracts just the spectra we care about (i.e. detector range and monitor). - """ - - for filename in self._data_files: - # The filename without path and extension will be the workspace name - ws_name = os.path.splitext(os.path.basename(filename))[0] - logger.debug('Loading file %s as workspace %s' % (filename, ws_name)) - - Load(Filename=filename, OutputWorkspace=ws_name) - - # Load the instrument parameters - LoadParameterFile(Workspace=ws_name, Filename=self._ipf_filename) - - # Add the workspace to the list of workspaces - self._workspace_names.append(ws_name) - - # Get the spectrum number for the monitor - instrument = mtd[ws_name].getInstrument() - monitor_index = int(instrument.getNumberParameter('Workflow.Monitor1-SpectrumNumber')[0]) - logger.debug('Workspace %s monitor 1 spectrum number :%d' % (ws_name, monitor_index)) - - # Chop data if required - try: - chop_threshold = mtd[ws_name].getInstrument().getNumberParameter('Workflow.ChopDataIfGreaterThan')[0] - x_max = mtd[ws_name].readX(0)[-1] - self._chopped_data = x_max > chop_threshold - except IndexError: - self._chopped_data = False - logger.information('Workspace %s need data chop: %s' % (ws_name, str(self._chopped_data))) - - workspaces = [ws_name] - if self._chopped_data: - ChopData(InputWorkspace=ws_name, OutputWorkspace=ws_name, MonitorWorkspaceIndex=monitor_index, - IntegrationRangeLower=5000.0, IntegrationRangeUpper=10000.0, NChops=5) - workspaces = mtd[ws_name].getNames() - - for chop_ws_name in workspaces: - # Get the monitor spectrum - monitor_ws_name = chop_ws_name + '_mon' - ExtractSingleSpectrum(InputWorkspace=chop_ws_name, OutputWorkspace=monitor_ws_name, - WorkspaceIndex=monitor_index) - - # Crop to the detectors required - CropWorkspace(InputWorkspace=chop_ws_name, OutputWorkspace=chop_ws_name, - StartWorkspaceIndex=int(self._spectra_range[0]) - 1, - EndWorkspaceIndex=int(self._spectra_range[1]) - 1) - - logger.information('Loaded workspace names: %s' % (str(self._workspace_names))) - logger.information('Chopped data: %s' % (str(self._chopped_data))) - - # Sum files if needed - if self._sum_files: - if self._chopped_data: - self._sum_chopped_runs() - else: - self._sum_regular_runs() - - logger.information('Summed workspace names: %s' % (str(self._workspace_names))) - - - def _sum_regular_runs(self): - """ - Sum runs with single workspace data. - """ - - # Use the first workspace name as the result of summation - summed_detector_ws_name = self._workspace_names[0] - summed_monitor_ws_name = self._workspace_names[0] + '_mon' - - # Get a list of the run numbers for the original data - run_numbers = ','.join([str(mtd[ws_name].getRunNumber()) for ws_name in self._workspace_names]) - - # Generate lists of the detector and monitor workspaces - detector_workspaces = ','.join(self._workspace_names) - monitor_workspaces = ','.join([ws_name + '_mon' for ws_name in self._workspace_names]) - - # Merge the raw workspaces - MergeRuns(InputWorkspaces=detector_workspaces, OutputWorkspace=summed_detector_ws_name) - MergeRuns(InputWorkspaces=monitor_workspaces, OutputWorkspace=summed_monitor_ws_name) - - # Delete old workspaces - for idx in range(1, len(self._workspace_names)): - DeleteWorkspace(self._workspace_names[idx]) - DeleteWorkspace(self._workspace_names[idx] + '_mon') - - # Derive the scale factor based on number of merged workspaces - scale_factor = 1.0 / len(self._workspace_names) - logger.information('Scale factor for summed workspaces: %f' % scale_factor) - - # Scale the new detector and monitor workspaces - Scale(InputWorkspace=summed_detector_ws_name, OutputWorkspace=summed_detector_ws_name, - Factor=scale_factor) - Scale(InputWorkspace=summed_monitor_ws_name, OutputWorkspace=summed_monitor_ws_name, - Factor=scale_factor) - - # Add the list of run numbers to the result workspace as a sample log - AddSampleLog(Workspace=summed_detector_ws_name, LogName='multi_run_numbers', - LogType='String', LogText=run_numbers) - - # Only have the one workspace now - self._workspace_names = [summed_detector_ws_name] - - - def _sum_chopped_runs(self): - """ - Sum runs with chopped data. - """ - - try: - num_merges = len(mtd[self._workspace_names[0]].getNames()) - except: - raise RuntimeError('Not all runs have been chapped, cannot sum.') - - merges = list() - - # Generate a list of workspaces to be merged - for idx in range(0, num_merges): - merges.append({'detector':list(), 'monitor':list()}) - - for ws_name in self._workspace_names: - detector_ws_name = mtd[ws_name].getNames()[idx] - monitor_ws_name = detector_ws_name + '_mon' - - merges[idx]['detector'].append(detector_ws_name) - merges[idx]['monitor'].append(monitor_ws_name) - - for merge in merges: - # Merge the chopped run segments - MergeRuns(InputWorkspaces=','.join(merge['detector']), OutputWorkspace=merge['detector'][0]) - MergeRuns(InputWorkspaces=','.join(merge['monitor']), OutputWorkspace=merge['monitor'][0]) - - # Scale the merged runs - merge_size = len(merge['detector']) - factor = 1.0 / merge_size - Scale(InputWorkspace=merge['detector'][0], OutputWorkspace=merge['detector'][0], Factor=factor, Operation='Multiply') - Scale(InputWorkspace=merge['monitor'][0], OutputWorkspace=merge['monitor'][0], Factor=factor, Operation='Multiply') - - # Remove the old workspaces - for idx in range(1, merge_size): - DeleteWorkspace(merge['detector'][idx]) - DeleteWorkspace(merge['monitor'][idx]) - - # Only have the one workspace now - self._workspace_names = [self._workspace_names[0]] - - - def _identify_bad_detectors(self, ws_name): - """ - Identify detectors which should be masked - - @param ws_name Name of worksapce to use ot get masking detectors - """ - - instrument = mtd[ws_name].getInstrument() - - try: - masking_type = instrument.getStringParameter('Workflow.Masking')[0] - except IndexError: - masking_type = 'None' - - logger.information('Masking type: %s' % (masking_type)) - - masked_spec = list() - - if masking_type == 'IdentifyNoisyDetectors': - ws_mask = '__workspace_mask' - IdentifyNoisyDetectors(InputWorkspace=ws_name, OutputWorkspace=ws_mask) - - # Convert workspace to a list of spectra - num_spec = mtd[ws_mask].getNumberHistograms() - masked_spec = [spec for spec in range(0, num_spec) if mtd[ws_mask].readY(spec)[0] == 0.0] - - # Remove the temporary masking workspace - DeleteWorkspace(ws_mask) - - logger.debug('Masked specta for workspace %s: %s' % (ws_name, str(masked_spec))) - - return masked_spec - - - def _unwrap_monitor(self, ws_name): - """ - Unwrap monitor if required based on value of Workflow.UnwrapMonitor parameter - - @param ws_name Name of workspace - @return True if the monitor was unwrapped - """ - - monitor_ws_name = ws_name + '_mon' - instrument = mtd[monitor_ws_name].getInstrument() - - # Determine if the monitor should be unwrapped - try: - unwrap = instrument.getStringParameter('Workflow.UnwrapMonitor')[0] - - if unwrap == 'Always': - should_unwrap = True - elif unwrap == 'BaseOnTimeRegime': - mon_time = mtd[monitor_ws_name].readX(0)[0] - det_time = mtd[ws_name].readX(0)[0] - logger.notice(str(mon_time) + " " + str(det_time)) - should_unwrap = mon_time == det_time - else: - should_unwrap = False - - except IndexError: - should_unwrap = False - - logger.debug('Need to unwrap monitor for %s: %s' % (ws_name, str(should_unwrap))) - - if should_unwrap: - sample = instrument.getSample() - sample_to_source = sample.getPos() - instrument.getSource().getPos() - radius = mtd[ws_name].getDetector(0).getDistance(sample) - z_dist = sample_to_source.getZ() - l_ref = z_dist + radius - - logger.debug('For workspace %s: radius=%d, z_dist=%d, l_ref=%d' % - (ws_name, radius, z_dist, l_ref)) - - _, join = UnwrapMonitor(InputWorkspace=monitor_ws_name, - OutputWorkspace=monitor_ws_name, LRef=l_ref) - - RemoveBins(InputWorkspace=monitor_ws_name, OutputWorkspace=monitor_ws_name, - XMin=join - 0.001, XMax=join + 0.001, - Interpolation='Linear') - - try: - FFTSmooth(InputWorkspace=monitor_ws_name, OutputWorkspace=monitor_ws_name, WorkspaceIndex=0) - except ValueError: - raise ValueError('Uneven bin widths are not supported.') - - return should_unwrap - - - def _process_monitor_efficiency(self, ws_name): - """ - Process monitor efficiency for a given workspace. - - @param ws_name Name of workspace to process monitor for - """ - - monitor_ws_name = ws_name + '_mon' - instrument = mtd[ws_name].getInstrument() - - try: - area = instrument.getNumberParameter('Workflow.Monitor1-Area')[0] - thickness = instrument.getNumberParameter('Workflow.Monitor1-Thickness')[0] - attenuation = instrument.getNumberParameter('Workflow.Monitor1-Attenuation')[0] - except IndexError: - raise ValueError('Cannot get monitor details form parameter file') - - if area == -1 or thickness == -1 or attenuation == -1: - logger.information('For workspace %s, skipping monitor efficiency' % (ws_name)) - return - - OneMinusExponentialCor(InputWorkspace=monitor_ws_name, OutputWorkspace=monitor_ws_name, - C=attenuation * thickness, C1=area) - - - def _scale_monitor(self, ws_name): - """ - Scale monitor intensity by a factor given as the Workflow.MonitorScalingFactor parameter. - - @param ws_name Name of workspace to process monitor for - """ - - monitor_ws_name = ws_name + '_mon' - instrument = mtd[ws_name].getInstrument() - - try: - scale_factor = instrument.getNumberParameter('Workflow.Monitor1-ScalingFactor')[0] - except IndexError: - logger.information('No monitor scaling factor found for workspace %s' % ws_name) - return - - if scale_factor != 1.0: - Scale(InputWorkspace=monitor_ws_name, OutputWorkspace=monitor_ws_name, - Factor=1.0 / scale_factor, Operation='Multiply') - - - def _group_spectra(self, ws_name, masked_detectors): - """ - Groups spectra in a given workspace according to the Workflow.GroupingMethod and - Workflow.GroupingFile parameters and GrpupingPolicy property. - - @param ws_name Name of workspace to group spectra of - @param masked_detectors List of spectra numbers to mask - """ - - instrument = mtd[ws_name].getInstrument() - - # If grouping as per he IPF is desired - if self._grouping_method == 'IPF': - # Get the grouping method from the parameter file - try: - grouping_method = instrument.getStringParameter('Workflow.GroupingMethod')[0] - except IndexError: - grouping_method = 'Individual' - - else: - # Otherwise use the value of GroupingPolicy - grouping_method = self._grouping_method - - logger.information('Grouping method for workspace %s is %s' % (ws_name, grouping_method)) - - if grouping_method == 'Individual': - # Nothing to do here - return - - elif grouping_method == 'All': - # Get a list of all spectra minus those which are masked - num_spec = mtd[ws_name].getNumberHistograms() - spectra_list = [spec for spec in range(0, num_spec) if spec not in masked_detectors] - - # Apply the grouping - GroupDetectors(InputWorkspace=ws_name, OutputWorkspace=ws_name, Behaviour='Average', - WorkspaceIndexList=spectra_list) - - elif grouping_method == 'File': - # Get the filename for the grouping file - if self._grouping_map_file is not None: - grouping_file = self._grouping_map_file - else: - try: - grouping_file = instrument.getStringParameter('Workflow.GroupingFile')[0] - except IndexError: - raise RuntimeError('Cannot get grouping file from properties or IPF.') - - # If the file is not found assume it is in the grouping files directory - if not os.path.isfile(grouping_file): - grouping_file = os.path.join(config.getString('groupingFiles.directory'), grouping_file) - - # If it is still not found just give up - if not os.path.isfile(grouping_file): - raise RuntimeError('Cannot find grouping file: %s' % (grouping_file)) - - # Mask detectors if required - if len(masked_detectors) > 0: - MaskDetectors(Workspace=ws_name, WorkspaceIndexList=masked_detectors) - - # Apply the grouping - GroupDetectors(InputWorkspace=ws_name, OutputWorkspace=ws_name, Behaviour='Average', - MapFile=grouping_file) - - elif grouping_method == 'Workspace': - # Apply the grouping - GroupDetectors(InputWorkspace=ws_name, OutputWorkspace=ws_name, Behaviour='Average', - CopyGroupingFromWorkspace=self._grouping_ws) - - else: - raise RuntimeError('Invalid grouping method %s for workspace %s' % (grouping_method, ws_name)) - - - def _fold_chopped(self, ws_name): - """ - Folds multiple frames of a data set into one workspace. - - @param ws_name Name of the group to fold - """ - - workspaces = mtd[ws_name].getNames() - merged_ws = ws_name + '_merged' - MergeRuns(InputWorkspaces=','.join(workspaces), OutputWorkspace=merged_ws) - - scaling_ws = '__scaling_ws' - unit = mtd[ws_name].getItem(0).getAxis(0).getUnit().unitID() - - ranges = [] - for ws in mtd[ws_name].getNames(): - x_min = mtd[ws].dataX(0)[0] - x_max = mtd[ws].dataX(0)[-1] - ranges.append((x_min, x_max)) - DeleteWorkspace(Workspace=ws) - - data_x = mtd[merged_ws].readX(0) - data_y = [] - data_e = [] - - for i in range(0, mtd[merged_ws].blocksize()): - y_val = 0.0 - for rng in ranges: - if data_x[i] >= rng[0] and data_x[i] <= rng[1]: - y_val += 1.0 - - data_y.append(y_val) - data_e.append(0.0) - - CreateWorkspace(OutputWorkspace=scaling_ws, DataX=data_x, DataY=data_y, DataE=data_e, UnitX=unit) - - Divide(LHSWorkspace=merged_ws, RHSWorkspace=scaling_ws, OutputWorkspace=ws_name) - DeleteWorkspace(Workspace=merged_ws) - DeleteWorkspace(Workspace=scaling_ws) - - def _rename_workspace(self, ws_name): - """ - Renames a worksapce according to the naming policy in the Workflow.NamingConvention parameter. - - @param ws_name Name of workspace - @return New name of workspace - """ - - is_multi_frame = isinstance(mtd[ws_name], WorkspaceGroup) - - # Get the instrument - if is_multi_frame: - instrument = mtd[ws_name].getItem(0).getInstrument() - else: - instrument = mtd[ws_name].getInstrument() - - # Get the naming convention parameter form the parameter file - try: - convention = instrument.getStringParameter('Workflow.NamingConvention')[0] - except IndexError: - # Defualt to run title if naming convention parameter not set - convention = 'RunTitle' - logger.information('Naming convention for workspace %s is %s' % (ws_name, convention)) - - # Get run number - if is_multi_frame: - run_number = mtd[ws_name].getItem(0).getRun()['run_number'].value - else: - run_number = mtd[ws_name].getRun()['run_number'].value - logger.information('Run number for workspace %s is %s' % (ws_name, run_number)) - - inst_name = instrument.getName() - for facility in config.getFacilities(): - try: - short_inst_name = facility.instrument(inst_name).shortName() - break - except RuntimeError: - pass - logger.information('Short name for instrument %s is %s' % (inst_name, short_inst_name)) - - # Get run title - if is_multi_frame: - run_title = mtd[ws_name].getItem(0).getRun()['run_title'].value.strip() - else: - run_title = mtd[ws_name].getRun()['run_title'].value.strip() - logger.information('Run title for workspace %s is %s' % (ws_name, run_title)) - - if self._sum_files: - multi_run_marker = '_multi' - else: - multi_run_marker = '' - - if convention == 'None': - new_name = ws_name - - elif convention == 'RunTitle': - valid = "-_.() %s%s" % (string.ascii_letters, string.digits) - formatted_title = ''.join([c for c in run_title if c in valid]) - new_name = '%s%s%s-%s' % (short_inst_name.lower(), run_number, multi_run_marker, formatted_title) - - elif convention == 'AnalyserReflection': - analyser = instrument.getStringParameter('analyser')[0] - reflection = instrument.getStringParameter('reflection')[0] - new_name = '%s%s%s_%s%s_red' % (short_inst_name.upper(), run_number, multi_run_marker, - analyser, reflection) - - else: - raise RuntimeError('No valid naming convention for workspace %s' % ws_name) - - logger.information('New name for %s workspace: %s' % (ws_name, new_name)) - - RenameWorkspace(InputWorkspace=ws_name, OutputWorkspace=new_name) - return new_name - - - def _plot_workspace(self, ws_name): - """ - Plot a given workspace based on the Plot property. - - @param ws_name Name of workspace to plot - """ - - if self._plot_type == 'Spectra' or self._plot_type == 'Both': - from mantidplot import plotSpectrum - num_spectra = mtd[ws_name].getNumberHistograms() - try: - plotSpectrum(ws_name, range(0, num_spectra)) - except RuntimeError: - logger.notice('Spectrum plotting canceled by user') - - can_plot_contour = mtd[ws_name].getNumberHistograms() > 1 - if (self._plot_type == 'Contour' or self._plot_type == 'Both') and can_plot_contour: - from mantidplot import importMatrixWorkspace - plot_workspace = importMatrixWorkspace(ws_name) - plot_workspace.plotGraph2D() - - - def _save(self, worksspace_names): - """ - Saves the workspaces to the default save directory. - - @param worksspace_names List of workspace names to save - """ - - for ws_name in worksspace_names: - if 'spe' in self._save_formats: - SaveSPE(InputWorkspace=ws_name, Filename=ws_name + '.spe') - - if 'nxs' in self._save_formats: - SaveNexusProcessed(InputWorkspace=ws_name, Filename=ws_name + '.nxs') - - if 'nxspe' in self._save_formats: - SaveNXSPE(InputWorkspace=ws_name, Filename=ws_name + '.nxspe') - - if 'ascii' in self._save_formats: - # Version 1 of SaveASCII produces output that works better with excel/origin - # For some reason this has to be done with an algorithm object, using the function - # wrapper with Version did not change the version that was run - saveAsciiAlg = mantid.api.AlgorithmManager.createUnmanaged('SaveAscii', 1) - saveAsciiAlg.initialize() - saveAsciiAlg.setProperty('InputWorkspace', ws_name) - saveAsciiAlg.setProperty('Filename', ws_name + '.dat') - saveAsciiAlg.execute() - - if 'aclimax' in self._save_formats: - if self._output_x_units == 'DeltaE_inWavenumber': - bins = '24, -0.005, 4000' #cm-1 - else: - bins = '3, -0.005, 500' #meV - Rebin(InputWorkspace=ws_name,OutputWorkspace= ws_name + '_aclimax_save_temp', Params=bins) - SaveAscii(InputWorkspace=ws_name + '_aclimax_save_temp', Filename=ws_name + '_aclimax.dat', Separator='Tab') - DeleteWorkspace(Workspace=ws_name + '_aclimax_save_temp') - - if 'davegrp' in self._save_formats: - ConvertSpectrumAxis(InputWorkspace=ws_name, OutputWorkspace=ws_name + '_davegrp_save_temp', - Target='ElasticQ', EMode='Indirect') - SaveDaveGrp(InputWorkspace=ws_name + '_davegrp_save_temp', Filename=ws_name + '.grp') - DeleteWorkspace(Workspace=ws_name + '_davegrp_save_temp') - - # Register algorithm with Mantid AlgorithmFactory.subscribe(ISISIndirectEnergyTransfer) diff --git a/Code/Mantid/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/MSGDiffractionReduction.py b/Code/Mantid/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/MSGDiffractionReduction.py deleted file mode 100644 index ca4277dad752..000000000000 --- a/Code/Mantid/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/MSGDiffractionReduction.py +++ /dev/null @@ -1,120 +0,0 @@ -#pylint: disable=no-init -from mantid.simpleapi import * -from mantid.api import * -from mantid.kernel import * -from mantid import config - -class MSGDiffractionReduction(PythonAlgorithm): - - def category(self): - return 'Diffraction;PythonAlgorithms' - - - def summary(self): - return 'Calculates the scattering & transmission for Indirect Geometry spectrometers.' - - - def PyInit(self): - self.declareProperty(StringArrayProperty(name='InputFiles'), - doc='Comma separated list of input files.') - - self.declareProperty(name='SumFiles', defaultValue=False, - doc='Enabled to sum spectra from each input file.') - - self.declareProperty(name='IndividualGrouping', defaultValue=False, - doc='Do not group results into a single spectra.') - - self.declareProperty(name='Instrument', defaultValue='IRIS', - validator=StringListValidator(['IRIS', 'OSIRIS', 'TOSCA', 'VESUVIO']), - doc='Instrument used for run') - - self.declareProperty(name='Mode', defaultValue='diffspec', - validator=StringListValidator(['diffspec', 'diffonly']), - doc='Diffraction mode used') - - self.declareProperty(IntArrayProperty(name='DetectorRange'), - doc='Range of detectors to use.') - - self.declareProperty(name='RebinParam', defaultValue='', - doc='Rebin parameters.') - - self.declareProperty(WorkspaceGroupProperty('OutputWorkspace', '',\ - direction=Direction.Output), - doc='Group name for the result workspaces.') - - self.declareProperty(StringArrayProperty(name='SaveFormats'), - doc='Save formats to save output in.') - - - def validateInputs(self): - """ - Checks for issues with user input. - """ - issues = dict() - - # Validate input files - input_files = self.getProperty('InputFiles').value - if len(input_files) == 0: - issues['InputFiles'] = 'InputFiles must contain at least one filename' - - # Validate detector range - detector_range = self.getProperty('DetectorRange').value - if len(detector_range) != 2: - issues['DetectorRange'] = 'DetectorRange must be an array of 2 values only' - else: - if detector_range[0] > detector_range[1]: - issues['DetectorRange'] = 'DetectorRange must be in format [lower_index,upper_index]' - - # Validate save formats - save_formats = self.getProperty('SaveFormats').value - valid_formats = ['gss', 'nxs', 'ascii'] - for s_format in save_formats: - if s_format not in valid_formats: - issues['SaveFormats'] = 'Contains invalid save formats' - break - - return issues - - - def PyExec(self): - from IndirectDiffractionReduction import MSGDiffractionReducer - - input_files = self.getProperty('InputFiles').value - sum_files = self.getProperty('SumFiles').value - individual_grouping = self.getProperty('IndividualGrouping').value - instrument_name = self.getPropertyValue('Instrument') - mode = self.getPropertyValue('Mode') - detector_range = self.getProperty('DetectorRange').value - rebin_string = self.getPropertyValue('RebinParam') - output_ws_group = self.getPropertyValue('OutputWorkspace') - save_formats = self.getProperty('SaveFormats').value - - ipf_filename = instrument_name + '_diffraction_' + mode + '_Parameters.xml' - - reducer = MSGDiffractionReducer() - reducer.set_instrument_name(instrument_name) - reducer.set_detector_range(int(detector_range[0] - 1), int(detector_range[1] - 1)) - reducer.set_parameter_file(ipf_filename) - reducer.set_sum_files(sum_files) - reducer.set_save_formats(save_formats) - - if individual_grouping: - reducer.set_grouping_policy('Individual') - - for in_file in input_files: - reducer.append_data_file(in_file) - - if rebin_string != '': - reducer.set_rebin_string(rebin_string) - - if instrument_name == 'VESUVIO': - reducer.append_load_option('Mode', 'FoilOut') - - reducer.reduce() - - result_ws_list = reducer.get_result_workspaces() - GroupWorkspaces(InputWorkspaces=result_ws_list, OutputWorkspace=output_ws_group) - self.setProperty('OutputWorkspace', output_ws_group) - - -AlgorithmFactory.subscribe(MSGDiffractionReduction) diff --git a/Code/Mantid/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/IndirectDiffractionReduction.h b/Code/Mantid/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/IndirectDiffractionReduction.h index a85d80e6f3f3..684be8368bd0 100644 --- a/Code/Mantid/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/IndirectDiffractionReduction.h +++ b/Code/Mantid/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/IndirectDiffractionReduction.h @@ -54,6 +54,7 @@ public slots: std::string reflection = ""); void runGenericReduction(QString instName, QString mode); + void saveGenericReductions(); void runOSIRISdiffonlyReduction(); private: diff --git a/Code/Mantid/MantidQt/CustomInterfaces/src/Indirect/IndirectDiffractionReduction.cpp b/Code/Mantid/MantidQt/CustomInterfaces/src/Indirect/IndirectDiffractionReduction.cpp index e299035cf7a8..388599eddfba 100644 --- a/Code/Mantid/MantidQt/CustomInterfaces/src/Indirect/IndirectDiffractionReduction.cpp +++ b/Code/Mantid/MantidQt/CustomInterfaces/src/Indirect/IndirectDiffractionReduction.cpp @@ -53,12 +53,14 @@ IndirectDiffractionReduction::IndirectDiffractionReduction(QWidget *parent) : connect(m_batchAlgoRunner, SIGNAL(batchComplete(bool)), this, SLOT(plotResults(bool))); } + ///Destructor IndirectDiffractionReduction::~IndirectDiffractionReduction() { saveSettings(); } + /** * Sets up UI components and Qt signal/slot connections. */ @@ -130,6 +132,7 @@ void IndirectDiffractionReduction::demonRun() } } + /** * Handles plotting result spectra from algorithm chains. * @@ -144,7 +147,7 @@ void IndirectDiffractionReduction::plotResults(bool error) return; } - // Ungroup the output workspace if MSGDiffractionReduction was used + // Ungroup the output workspace if generic reducer was used if(AnalysisDataService::Instance().doesExist("IndirectDiffraction_Workspaces")) { WorkspaceGroup_sptr diffResultsGroup = AnalysisDataService::Instance().retrieveWS("IndirectDiffraction_Workspaces"); @@ -154,6 +157,8 @@ void IndirectDiffractionReduction::plotResults(bool error) diffResultsGroup->removeAll(); AnalysisDataService::Instance().remove("IndirectDiffraction_Workspaces"); + + saveGenericReductions(); } QString instName = m_uiForm.iicInstrumentConfiguration->getInstrumentName(); @@ -178,6 +183,66 @@ void IndirectDiffractionReduction::plotResults(bool error) runPythonCode(pyInput); } + +/** + * Handles saving the reductions from the generic algorithm. + */ +void IndirectDiffractionReduction::saveGenericReductions() +{ + for(auto it = m_plotWorkspaces.begin(); it != m_plotWorkspaces.end(); ++it) + { + std::string wsName = *it; + + if(m_uiForm.ckGSS->isChecked()) + { + std::string tofWsName = wsName + "_tof"; + + // Convert to TOF for GSS + IAlgorithm_sptr convertUnits = AlgorithmManager::Instance().create("ConvertUnits"); + convertUnits->initialize(); + convertUnits->setProperty("InputWorkspace", wsName); + convertUnits->setProperty("OutputWorkspace", tofWsName); + convertUnits->setProperty("Target", "TOF"); + m_batchAlgoRunner->addAlgorithm(convertUnits); + + BatchAlgorithmRunner::AlgorithmRuntimeProps inputFromConvUnitsProps; + inputFromConvUnitsProps["InputWorkspace"] = tofWsName; + + // Save GSS + std::string gssFilename = wsName + ".gss"; + IAlgorithm_sptr saveGSS = AlgorithmManager::Instance().create("SaveGSS"); + saveGSS->initialize(); + saveGSS->setProperty("Filename", gssFilename); + m_batchAlgoRunner->addAlgorithm(saveGSS, inputFromConvUnitsProps); + } + + if(m_uiForm.ckNexus->isChecked()) + { + // Save NEXus using SaveNexusProcessed + std::string nexusFilename = wsName + ".nxs"; + IAlgorithm_sptr saveNexus = AlgorithmManager::Instance().create("SaveNexusProcessed"); + saveNexus->initialize(); + saveNexus->setProperty("InputWorkspace", wsName); + saveNexus->setProperty("Filename", nexusFilename); + m_batchAlgoRunner->addAlgorithm(saveNexus); + } + + if(m_uiForm.ckAscii->isChecked()) + { + // Save ASCII using SaveAscii version 1 + std::string asciiFilename = wsName + ".dat"; + IAlgorithm_sptr saveASCII = AlgorithmManager::Instance().create("SaveAscii", 1); + saveASCII->initialize(); + saveASCII->setProperty("InputWorkspace", wsName); + saveASCII->setProperty("Filename", asciiFilename); + m_batchAlgoRunner->addAlgorithm(saveASCII); + } + } + + m_batchAlgoRunner->executeBatchAsync(); +} + + /** * Runs a diffraction reduction for any instrument in any mode. * @@ -195,15 +260,13 @@ void IndirectDiffractionReduction::runGenericReduction(QString instName, QString if(!rebinStart.isEmpty() && !rebinWidth.isEmpty() && !rebinEnd.isEmpty()) rebin = rebinStart + "," + rebinWidth + "," + rebinEnd; - bool individualGrouping = m_uiForm.ckIndividualGrouping->isChecked(); - // Get detector range std::vector detRange; detRange.push_back(m_uiForm.set_leSpecMin->text().toLong()); detRange.push_back(m_uiForm.set_leSpecMax->text().toLong()); - // Get MSGDiffractionReduction algorithm instance - IAlgorithm_sptr msgDiffReduction = AlgorithmManager::Instance().create("MSGDiffractionReduction"); + // Get generic reduction algorithm instance + IAlgorithm_sptr msgDiffReduction = AlgorithmManager::Instance().create("ISISIndirectDiffractionReduction"); msgDiffReduction->initialize(); // Get save formats @@ -217,17 +280,20 @@ void IndirectDiffractionReduction::runGenericReduction(QString instName, QString msgDiffReduction->setProperty("Mode", mode.toStdString()); msgDiffReduction->setProperty("SumFiles", m_uiForm.dem_ckSumFiles->isChecked()); msgDiffReduction->setProperty("InputFiles", m_uiForm.dem_rawFiles->getFilenames().join(",").toStdString()); - msgDiffReduction->setProperty("DetectorRange", detRange); + msgDiffReduction->setProperty("SpectraRange", detRange); msgDiffReduction->setProperty("RebinParam", rebin.toStdString()); - msgDiffReduction->setProperty("IndividualGrouping", individualGrouping); - msgDiffReduction->setProperty("SaveFormats", saveFormats); msgDiffReduction->setProperty("OutputWorkspace", "IndirectDiffraction_Workspaces"); + // Add the pproperty for grouping policy if needed + if(m_uiForm.ckIndividualGrouping->isChecked()) + msgDiffReduction->setProperty("GroupingPolicy", "Individual"); + m_batchAlgoRunner->addAlgorithm(msgDiffReduction); m_batchAlgoRunner->executeBatchAsync(); } + /** * Runs a diffraction reduction for OSIRIS operating in diffonly mode using the OSIRISDiffractionReduction algorithm. */ @@ -308,6 +374,7 @@ void IndirectDiffractionReduction::runOSIRISdiffonlyReduction() m_batchAlgoRunner->executeBatchAsync(); } + /** * Loads an empty instrument and returns a pointer to the workspace. * @@ -344,6 +411,7 @@ MatrixWorkspace_sptr IndirectDiffractionReduction::loadInstrument(std::string in return instWorkspace; } + /** * Handles setting default spectra range when an instrument configuration is selected. * @@ -406,6 +474,7 @@ void IndirectDiffractionReduction::instrumentSelected(const QString & instrument } } + /** * Handles opening the directory manager window. */ @@ -416,6 +485,7 @@ void IndirectDiffractionReduction::openDirectoryDialog() ad->setFocus(); } + /** * Handles the user clicking the help button. */ @@ -424,10 +494,12 @@ void IndirectDiffractionReduction::help() MantidQt::API::HelpWindow::showCustomInterface(NULL, QString("Indirect_Diffraction")); } + void IndirectDiffractionReduction::initLocalPython() { } + void IndirectDiffractionReduction::loadSettings() { QSettings settings; @@ -442,6 +514,7 @@ void IndirectDiffractionReduction::loadSettings() settings.endGroup(); } + void IndirectDiffractionReduction::saveSettings() { QSettings settings; @@ -452,6 +525,7 @@ void IndirectDiffractionReduction::saveSettings() settings.endGroup(); } + /** * Validates the rebinning fields and updates invalid markers. * @@ -501,6 +575,7 @@ bool IndirectDiffractionReduction::validateRebin() return rebinValid; } + /** * Checks to see if the vanadium and cal file fields are valid. * @@ -517,6 +592,7 @@ bool IndirectDiffractionReduction::validateVanCal() return true; } + /** * Disables and shows message on run button indicating that run files have benn changed. */ @@ -526,6 +602,7 @@ void IndirectDiffractionReduction::runFilesChanged() m_uiForm.pbRun->setText("Editing..."); } + /** * Disables and shows message on run button to indicate searching for data files. */ @@ -535,6 +612,7 @@ void IndirectDiffractionReduction::runFilesFinding() m_uiForm.pbRun->setText("Finding files..."); } + /** * Updates run button with result of file search. */ @@ -554,6 +632,7 @@ void IndirectDiffractionReduction::runFilesFound() m_uiForm.dem_ckSumFiles->setChecked(false); } + /** * Handles the user toggling the individual grouping check box. * diff --git a/Code/Mantid/Testing/SystemTests/tests/analysis/IndirectDiffractionTests.py b/Code/Mantid/Testing/SystemTests/tests/analysis/IndirectDiffractionTests.py index f5ef84cc4597..aa515d519e1b 100644 --- a/Code/Mantid/Testing/SystemTests/tests/analysis/IndirectDiffractionTests.py +++ b/Code/Mantid/Testing/SystemTests/tests/analysis/IndirectDiffractionTests.py @@ -5,7 +5,7 @@ class MSGDiffractionReductionTest(stresstesting.MantidStressTest): """ - Base class for tests that use the MSGDiffractionReduction algorithm. + Base class for tests that use the ISISIndirectDiffractionReduction algorithm. """ __metaclass__ = ABCMeta @@ -20,17 +20,17 @@ def get_reference_file(self): def runTest(self): """ - Runs an MSGDiffractionReduction with the configured parameters. + Runs an ISISIndirectDiffractionReduction with the configured parameters. """ - from mantid.simpleapi import MSGDiffractionReduction + from mantid.simpleapi import ISISIndirectDiffractionReduction from mantid import mtd - MSGDiffractionReduction(InputFiles=self.raw_file, - OutputWorkspace=self.output_workspace_group, - Instrument=self.instrument, - Mode=self.mode, - DetectorRange=self.detector_range, - RebinParam=self.rebinning) + ISISIndirectDiffractionReduction(InputFiles=self.raw_file, + OutputWorkspace=self.output_workspace_group, + Instrument=self.instrument, + Mode=self.mode, + SpectraRange=self.spectra_range, + RebinParam=self.rebinning) self._output_workspace = mtd[self.output_workspace_group].getNames()[0] @@ -51,7 +51,7 @@ def __init__(self): self.instrument = 'IRIS' self.mode = 'diffspec' self.raw_file = 'IRS21360.raw' - self.detector_range = [105, 112] + self.spectra_range = [105, 112] self.rebinning = '3.0,0.001,4.0' self.output_workspace_group = 'IRIS_Diffraction_DiffSpec_Test' @@ -68,7 +68,7 @@ def __init__(self): self.instrument = 'TOSCA' self.mode = 'diffspec' self.raw_file = 'TSC11453.raw' - self.detector_range = [146, 149] + self.spectra_range = [146, 149] self.rebinning = '0.5,0.001,2.1' self.output_workspace_group = 'TOSCA_Diffraction_DiffSpec_Test' @@ -85,7 +85,7 @@ def __init__(self): self.instrument = 'OSIRIS' self.mode = 'diffspec' self.raw_file = 'osiris00101300.raw' - self.detector_range = [3, 962] + self.spectra_range = [3, 962] self.rebinning = '2.0,0.001,3.0' self.output_workspace_group = 'OSIRIS_Diffraction_DiffSpec_Test' diff --git a/Code/Mantid/docs/source/algorithms/ISISIndirectDiffractionReduction-v1.rst b/Code/Mantid/docs/source/algorithms/ISISIndirectDiffractionReduction-v1.rst new file mode 100644 index 000000000000..120fa4a9075c --- /dev/null +++ b/Code/Mantid/docs/source/algorithms/ISISIndirectDiffractionReduction-v1.rst @@ -0,0 +1,47 @@ +.. algorithm:: + +.. summary:: + +.. alias:: + +.. properties:: + +Description +----------- + +The generic routine used to reduce diffraction runs from indirect geometry +inelastic instruments at ISIS. + +Workflow +-------- + +.. diagram:: ISISIndirectDiffractionReduction-v1_wkflw.dot + +Usage +----- + +**Example - Running ISISIndirectDiffractionReduction.** + +.. testcode:: ExISISIndirectDiffractionReductionSimple + + ISISIndirectDiffractionReduction(InputFiles='IRS21360.raw', + OutputWorkspace='DiffractionReductions', + Instrument='IRIS', + Mode='diffspec', + SpectraRange=[105,112]) + + ws = mtd['DiffractionReductions'].getItem(0) + + print 'Workspace name: %s' % ws.getName() + print 'Number of spectra: %d' % ws.getNumberHistograms() + print 'Number of bins: %s' % ws.blocksize() + +Output: + +.. testoutput:: ExISISIndirectDiffractionReductionSimple + + Workspace name: IRS21360_diffspec_red + Number of spectra: 1 + Number of bins: 1935 + +.. categories:: diff --git a/Code/Mantid/docs/source/algorithms/ISISIndirectEnergyTransfer-v1.rst b/Code/Mantid/docs/source/algorithms/ISISIndirectEnergyTransfer-v1.rst index 5f295ba18848..c0d2f42e1b5f 100644 --- a/Code/Mantid/docs/source/algorithms/ISISIndirectEnergyTransfer-v1.rst +++ b/Code/Mantid/docs/source/algorithms/ISISIndirectEnergyTransfer-v1.rst @@ -12,6 +12,11 @@ Description Performs a reduction from raw time of flight to energy transfer for an inelastic indirect geometry instrument at ISIS. +Workflow +-------- + +.. diagram:: ISISIndirectEnergyTransfer-v1_wkflw.dot + Usage ----- diff --git a/Code/Mantid/docs/source/algorithms/MSGDiffractionReduction-v1.rst b/Code/Mantid/docs/source/algorithms/MSGDiffractionReduction-v1.rst deleted file mode 100644 index de9bb0b693f6..000000000000 --- a/Code/Mantid/docs/source/algorithms/MSGDiffractionReduction-v1.rst +++ /dev/null @@ -1,41 +0,0 @@ -.. algorithm:: - -.. summary:: - -.. alias:: - -.. properties:: - -Description ------------ - -The generic routine used to reduce diffraction runs from indirect inelastic geometry instruments at ISIS. - -Usage ------ - -**Example - Running MSGDiffractionReduction.** - -.. testcode:: ExMSGDiffractionReductionSimple - - MSGDiffractionReduction(InputFiles='IRS21360.raw', - OutputWorkspace='DiffractionReductions', - Instrument='IRIS', - Mode='diffspec', - DetectorRange=[105,112]) - - ws = mtd['DiffractionReductions'].getItem(0) - - print 'Workspace name: %s' % ws.getName() - print 'Number of spectra: %d' % ws.getNumberHistograms() - print 'Number of bins: %s' % ws.blocksize() - -Output: - -.. testoutput:: ExMSGDiffractionReductionSimple - - Workspace name: irs21360_diffspec_red - Number of spectra: 1 - Number of bins: 1935 - -.. categories:: diff --git a/Code/Mantid/docs/source/diagrams/ISISIndirectDiffractionReduction-v1_wkflw.dot b/Code/Mantid/docs/source/diagrams/ISISIndirectDiffractionReduction-v1_wkflw.dot new file mode 100644 index 000000000000..d96dc1155cc5 --- /dev/null +++ b/Code/Mantid/docs/source/diagrams/ISISIndirectDiffractionReduction-v1_wkflw.dot @@ -0,0 +1,92 @@ +digraph ISISIndirectDiffractionReduction { + label="ISISIndirectDiffractionReduction Flowchart" + $global_style + + subgraph decisions { + $decision_style + need_to_unwrap [label="Need To Unwrap Monitor?"] + is_multi_frame [label="Is Multiple Frames?"] + } + + subgraph params { + $param_style + InputFiles + SumFiles + Instrument + Mode + SpectraRange + RebinParam + GroupingPolicy + OutputWorkspace + } + + subgraph algorithms { + $algorithm_style + ConvertUnits_unwrap_monitor [label="ConvertUnits"] + ConvertUnits_dSpacing [label="ConvertUnits"] + GroupWorkspaces + } + + subgraph processes { + $process_style + load_files [label="load_files\nLoads run files and instrument parameters.\nChops data into multiple frames if required.\nSumms runs if required."] + get_multi_frame_rebin [label="get_multi_frame_rebin\nGets a rebin string for rebinning\nmultiple frames."] + identify_bad_detectors [label="identify_bad_detectors\nGets a list of noisy detectors using\nthe IdentifyNoisyDetectors algorithm."] + unwrap_monitor [label="unwrap_monitor\nUnwraps monitor based on IPF options\nusing UnwrapMonitor algorithm."] + process_monitor_efficiency [label="process_monitor_efficiency\nCorrects for monitor efficiency by\narea, thickness and attenuation."] + scale_monitor [label="scale_monitor\nApplies monitor scaling\nas per the IPF."] + scale_detectors [label="scale_detectors\nScales detectors by the\nmonitor intensity."] + rebin_reduction [label="rebin_reduction\nRebins the reduction based\non the RebinParam option."] + group_spectra [label="group_spectra\nGroups spectra based\non the GroupingPolicy."] + fold_chopped [label="fold_chopped\nFolds multiple frames into\na single reduction."] + rename_reduction [label="rename_reduction\nRenames reduced workspaces\nas per the IPF options."] + } + + subgraph values { + $value_style + multi_frame_rebin_string [label="Multiple Frame Rebin String"] + multi_frame_num_bins [label="Maximum Number of Bins"] + masked_detectors [label="Masked Detectors"] + e_mode_unwrap_monitor [label="Elastic"] + unit_unwrap_monitor [label="Wavelength"] + e_mode [label="Elastic"] + dspacing_unit [label="dSpacing"] + } + + InputFiles -> load_files + SpectraRange -> load_files + Instrument -> load_files + Mode -> load_files + SumFiles -> load_files + load_files -> get_multi_frame_rebin + RebinParam -> get_multi_frame_rebin + get_multi_frame_rebin -> multi_frame_rebin_string + get_multi_frame_rebin -> multi_frame_num_bins + load_files -> identify_bad_detectors + identify_bad_detectors -> masked_detectors + load_files -> need_to_unwrap + need_to_unwrap -> unwrap_monitor [label="Yes"] + unwrap_monitor -> ConvertUnits_unwrap_monitor + unit_unwrap_monitor -> ConvertUnits_unwrap_monitor [label="Target"] + e_mode_unwrap_monitor -> ConvertUnits_unwrap_monitor [label="EMode"] + ConvertUnits_unwrap_monitor -> process_monitor_efficiency + need_to_unwrap -> process_monitor_efficiency [label="No"] + process_monitor_efficiency -> scale_monitor + scale_monitor -> scale_detectors + scale_detectors -> ConvertUnits_dSpacing + dspacing_unit -> ConvertUnits_dSpacing [label="Target"] + e_mode -> ConvertUnits_dSpacing [label="EMode"] + ConvertUnits_dSpacing -> rebin_reduction + RebinParam -> rebin_reduction + multi_frame_rebin_string -> rebin_reduction + multi_frame_num_bins -> rebin_reduction + rebin_reduction -> group_spectra + GroupingPolicy -> group_spectra + masked_detectors -> group_spectra + group_spectra -> is_multi_frame + is_multi_frame -> fold_chopped [label="Yes"] + fold_chopped -> rename_reduction + is_multi_frame -> rename_reduction [label="No"] + rename_reduction -> GroupWorkspaces + GroupWorkspaces -> OutputWorkspace +} diff --git a/Code/Mantid/docs/source/diagrams/ISISIndirectEnergyTransfer-v1_wkflw.dot b/Code/Mantid/docs/source/diagrams/ISISIndirectEnergyTransfer-v1_wkflw.dot new file mode 100644 index 000000000000..4d5383966c42 --- /dev/null +++ b/Code/Mantid/docs/source/diagrams/ISISIndirectEnergyTransfer-v1_wkflw.dot @@ -0,0 +1,153 @@ +digraph ISISIndirectEnergyTransfer { + label="ISISIndirectEnergyTransfer Flowchart" + $global_style + + subgraph decisions { + $decision_style + is_multi_frame [label="Is Data Multiple Frames?"] + is_x_unit_mev [label="Is UnitX in meV?"] + should_fold [label="FoldMultipleFrames?"] + have_scale_factor [label="ScaleFactor is not 1?"] + have_background_range [label="Have BackgroundRange?"] + have_calibration_workspace [label="Have CalibrationWorksapce"] + have_detailed_balance [label="Have DetailedBalance?"] + have_save_formats [label="Have SaveFormats?"] + need_to_unwrap [label="Need to Unwrap Monitor?"] + } + + subgraph params { + $param_style + InputFiles + SumFiles + CalibrationWorksapce + Instrument + Analyser + Reflection + SpectraRange + BackgroundRange + RebinString + DetailedBalance + ScaleFactor + GroupingMethod + GroupingWorkspace + MapFile + UnitX + SaveFormats + OutputWorkspace + } + + subgraph algorithms { + $algorithm_style + ConvertUnits_unwrap_monitor [label="ConvertUnits"] + ConvertUnits_DeltaE [label="ConvertUnits"] + ConvertUnits_Output [label="ConvertUnits"] + ConvertToDistribution + ConvertFromDistribution + ExponentialCorrection + GroupWorkspaces + CalculateFlatBackground + Divide + Scale + CorrectKiKf + } + + subgraph processes { + $process_style + load_files [label="load_files\nLoads run files and instrument parameters.\nChops data into multiple frames if required.\nSumms runs if required."] + get_multi_frame_rebin [label="get_multi_frame_rebin\nGets a rebin string for rebinning\nmultiple frames."] + identify_bad_detectors [label="identify_bad_detectors\nGets a list of noisy detectors using\nthe IdentifyNoisyDetectors algorithm."] + unwrap_monitor [label="unwrap_monitor\nUnwraps monitor based on IPF options\nusing UnwrapMonitor algorithm."] + process_monitor_efficiency [label="process_monitor_efficiency\nCorrects for monitor efficiency by\narea, thickness and attenuation."] + scale_monitor [label="scale_monitor\nApplies monitor scaling\nas per the IPF."] + scale_detectors [label="scale_detectors\nScales detectors by the\nmonitor intensity."] + rebin_reduction [label="rebin_reduction\nRebins the reduction based\non the RebinParam option."] + group_spectra [label="group_spectra\nGroups spectra based\non the GroupingPolicy."] + fold_chopped [label="fold_chopped\nFolds multiple frames into\na single reduction."] + rename_reduction [label="rename_reduction\nRenames reduced workspaces\nas per the IPF options."] + save_reduction [label="save_reduction\nSaves reduced files in formats\ngiven by SaveFormats."] + } + + subgraph values { + $value_style + multi_frame_rebin_string [label="Multiple Frame Rebin String"] + multi_frame_num_bins [label="Maximum Number of Bins"] + masked_detectors [label="Masked Detectors"] + detailed_balance_corr_factor [label="11.606 / (2 * DetailedBalance)"] + e_mode_unwrap_monitor [label="Elastic"] + unit_unwrap_monitor [label="Wavelength"] + e_mode [label="Indirect"] + energy_unit [label="DeltaE"] + e_mode_output_unit [label="Indirect"] + } + + InputFiles -> load_files + Instrument -> load_files + Analyser -> load_files + Reflection -> load_files + SumFiles -> load_files + SpectraRange -> load_files + load_files -> get_multi_frame_rebin + RebinString -> get_multi_frame_rebin + get_multi_frame_rebin -> multi_frame_rebin_string + get_multi_frame_rebin -> multi_frame_num_bins + load_files -> identify_bad_detectors + identify_bad_detectors -> masked_detectors + load_files -> need_to_unwrap + need_to_unwrap -> unwrap_monitor [label="Yes"] + unwrap_monitor -> ConvertUnits_unwrap_monitor + unit_unwrap_monitor -> ConvertUnits_unwrap_monitor [label="Target"] + e_mode_unwrap_monitor -> ConvertUnits_unwrap_monitor [label="EMode"] + ConvertUnits_unwrap_monitor -> process_monitor_efficiency + need_to_unwrap -> process_monitor_efficiency [label="No"] + process_monitor_efficiency -> scale_monitor + scale_monitor -> have_background_range + have_background_range -> ConvertToDistribution [label="Yes"] + ConvertToDistribution -> CalculateFlatBackground + BackgroundRange -> CalculateFlatBackground + CalculateFlatBackground -> ConvertFromDistribution + ConvertFromDistribution -> have_calibration_workspace + have_background_range -> have_calibration_workspace [label="No"] + have_calibration_workspace -> Divide [label="Yes"] + CalibrationWorksapce -> Divide + Divide -> scale_detectors + have_calibration_workspace -> scale_detectors [label="No"] + scale_detectors -> ConvertUnits_DeltaE + energy_unit -> ConvertUnits_DeltaE [label="Target"] + e_mode -> ConvertUnits_DeltaE [label="EMode"] + ConvertUnits_DeltaE -> CorrectKiKf + e_mode -> CorrectKiKf [label="EMode"] + CorrectKiKf -> rebin_reduction + RebinString -> rebin_reduction + multi_frame_rebin_string -> rebin_reduction + multi_frame_num_bins -> rebin_reduction + rebin_reduction -> have_detailed_balance + have_detailed_balance -> ExponentialCorrection [label="Yes"] + DetailedBalance -> detailed_balance_corr_factor + detailed_balance_corr_factor -> ExponentialCorrection [label="C1"] + ExponentialCorrection -> have_scale_factor + have_detailed_balance -> have_scale_factor [label="No"] + have_scale_factor -> Scale [label="Yes"] + ScaleFactor -> Scale + Scale -> group_spectra + have_scale_factor -> group_spectra [label="No"] + masked_detectors -> group_spectra + GroupingMethod -> group_spectra + MapFile -> group_spectra + GroupingWorkspace -> group_spectra + group_spectra -> is_multi_frame + is_multi_frame -> should_fold [label="Yes"] + should_fold -> fold_chopped [label="Yes"] + should_fold -> is_x_unit_mev [label="No"] + fold_chopped -> is_x_unit_mev + is_multi_frame -> is_x_unit_mev [label="No"] + is_x_unit_mev -> ConvertUnits_Output [label="No"] + UnitX -> ConvertUnits_Output [label="Target"] + e_mode_output_unit -> ConvertUnits_Output [label="EMode"] + ConvertUnits_Output -> rename_reduction + is_x_unit_mev -> rename_reduction [label="Yes"] + rename_reduction -> GroupWorkspaces + rename_reduction -> have_save_formats + have_save_formats -> save_reduction [label="Yes"] + SaveFormats -> save_reduction + GroupWorkspaces -> OutputWorkspace +} diff --git a/Code/Mantid/scripts/Inelastic/IndirectDiffractionReduction.py b/Code/Mantid/scripts/Inelastic/IndirectDiffractionReduction.py deleted file mode 100644 index af6f9f98bd8b..000000000000 --- a/Code/Mantid/scripts/Inelastic/IndirectDiffractionReduction.py +++ /dev/null @@ -1,74 +0,0 @@ -#pylint: disable=invalid-name -import mantid -from msg_reducer import MSGReducer -import inelastic_indirect_reduction_steps as steps - - -class MSGDiffractionReducer(MSGReducer): - """Reducer for Diffraction on IRIS and TOSCA. - """ - - def __init__(self): - super(MSGDiffractionReducer, self).__init__() - self._grouping_policy = 'All' - - def _setup_steps(self): - self.append_step(steps.IdentifyBadDetectors( - MultipleFrames=self._multiple_frames)) - self.append_step(steps.HandleMonitor( - MultipleFrames=self._multiple_frames)) - self.append_step(steps.CorrectByMonitor( - MultipleFrames=self._multiple_frames, EMode="Elastic")) - - if self._multiple_frames: - if self._fold_multiple_frames: - self.append_step(steps.FoldData()) - else: - return - - step = mantid.AlgorithmManager.create("ConvertUnits") - step.setPropertyValue("Target", "dSpacing") - step.setPropertyValue("EMode", "Elastic") - self.append_step(step) - - if self._rebin_string is not None: - step = mantid.AlgorithmManager.create("Rebin") - step.setPropertyValue("Params", self._rebin_string) - self.append_step(step) - else: - self.append_step(steps.RebinToFirstSpectrum()) - - step = steps.Grouping() - step.set_grouping_policy(self._grouping_policy) - self.append_step(step) - - # The "SaveItem" step saves the files in the requested formats. - if len(self._save_formats) > 0: - step = steps.SaveItem() - step.set_formats(self._save_formats) - self.append_step(step) - - step = steps.Naming() - self.append_step(step) - - def set_grouping_policy(self, policy): - """ - Sets the grouping policy for the result data. - - @parm policy New grouping policy - """ - if not isinstance(policy, str): - raise ValueError('Grouping policy must be a string') - - self._grouping_policy = policy - - -def getStringProperty(workspace, property): - """This function is used in the interface. - """ - inst = mantid.AnalysisDataService[workspace].getInstrument() - try: - prop = inst.getStringParameter(property)[0] - except IndexError: - return "" - return prop diff --git a/Code/Mantid/scripts/Inelastic/IndirectReductionCommon.py b/Code/Mantid/scripts/Inelastic/IndirectReductionCommon.py new file mode 100644 index 000000000000..ddd8cc26d931 --- /dev/null +++ b/Code/Mantid/scripts/Inelastic/IndirectReductionCommon.py @@ -0,0 +1,756 @@ +#pylint: disable=invalid-name,too-many-branches,too-many-arguments,deprecated-module,no-name-in-module +from mantid.api import WorkspaceGroup, AlgorithmManager +from mantid import mtd, logger, config + +import os +import numpy as np + +#------------------------------------------------------------------------------- + +def load_files(data_files, ipf_filename, spec_min, spec_max, sum_files, load_opts=None): + """ + Loads a set of files and extracts just the spectra we care about (i.e. detector range and monitor). + + @param data_files List of data file names + @param ipf_filename FIle path/name for the instrument parameter file to load + @param spec_min Minimum spectra ID to load + @param spec_max Maximum spectra ID to load + @param sum Sum loaded files + @param load_opts Additional options to be passed to load algorithm + + @return List of loaded workspace names and flag indicating chopped data + """ + from mantid.simpleapi import (Load, LoadVesuvio, LoadParameterFile, + ChopData, ExtractSingleSpectrum, + CropWorkspace) + + if load_opts is None: + load_opts = {} + + workspace_names = [] + + for filename in data_files: + # The filename without path and extension will be the workspace name + ws_name = os.path.splitext(os.path.basename(filename))[0] + logger.debug('Loading file %s as workspace %s' % (filename, ws_name)) + + if 'VESUVIO' in ipf_filename: + evs_filename = os.path.basename(filename).replace('EVS', '') + LoadVesuvio(Filename=evs_filename, + OutputWorkspace=ws_name, + SpectrumList='1-198', + **load_opts) + else: + Load(Filename=filename, + OutputWorkspace=ws_name, + **load_opts) + + # Load the instrument parameters + LoadParameterFile(Workspace=ws_name, + Filename=ipf_filename) + + # Add the workspace to the list of workspaces + workspace_names.append(ws_name) + + # Get the spectrum number for the monitor + instrument = mtd[ws_name].getInstrument() + monitor_index = int(instrument.getNumberParameter('Workflow.Monitor1-SpectrumNumber')[0]) + logger.debug('Workspace %s monitor 1 spectrum number :%d' % (ws_name, monitor_index)) + + # Chop data if required + try: + chop_threshold = mtd[ws_name].getInstrument().getNumberParameter('Workflow.ChopDataIfGreaterThan')[0] + x_max = mtd[ws_name].readX(0)[-1] + chopped_data = x_max > chop_threshold + except IndexError: + chopped_data = False + logger.information('Workspace %s need data chop: %s' % (ws_name, str(chopped_data))) + + workspaces = [ws_name] + if chopped_data: + ChopData(InputWorkspace=ws_name, + OutputWorkspace=ws_name, + MonitorWorkspaceIndex=monitor_index, + IntegrationRangeLower=5000.0, + IntegrationRangeUpper=10000.0, + NChops=5) + workspaces = mtd[ws_name].getNames() + + for chop_ws_name in workspaces: + # Get the monitor spectrum + monitor_ws_name = chop_ws_name + '_mon' + ExtractSingleSpectrum(InputWorkspace=chop_ws_name, + OutputWorkspace=monitor_ws_name, + WorkspaceIndex=monitor_index) + + # Crop to the detectors required + CropWorkspace(InputWorkspace=chop_ws_name, OutputWorkspace=chop_ws_name, + StartWorkspaceIndex=int(spec_min) - 1, + EndWorkspaceIndex=int(spec_max) - 1) + + logger.information('Loaded workspace names: %s' % (str(workspace_names))) + logger.information('Chopped data: %s' % (str(chopped_data))) + + # Sum files if needed + if sum_files: + if chopped_data: + workspace_names = sum_chopped_runs(workspace_names) + else: + workspace_names = sum_regular_runs(workspace_names) + + logger.information('Summed workspace names: %s' % (str(workspace_names))) + + return workspace_names, chopped_data + +#------------------------------------------------------------------------------- + +def sum_regular_runs(workspace_names): + """ + Sum runs with single workspace data. + + @param workspace_names List of names of input workspaces + @return List of names of workspaces + """ + from mantid.simpleapi import (MergeRuns, Scale, AddSampleLog, + DeleteWorkspace) + + # Use the first workspace name as the result of summation + summed_detector_ws_name = workspace_names[0] + summed_monitor_ws_name = workspace_names[0] + '_mon' + + # Get a list of the run numbers for the original data + run_numbers = ','.join([str(mtd[ws_name].getRunNumber()) for ws_name in workspace_names]) + + # Generate lists of the detector and monitor workspaces + detector_workspaces = ','.join(workspace_names) + monitor_workspaces = ','.join([ws_name + '_mon' for ws_name in workspace_names]) + + # Merge the raw workspaces + MergeRuns(InputWorkspaces=detector_workspaces, + OutputWorkspace=summed_detector_ws_name) + MergeRuns(InputWorkspaces=monitor_workspaces, + OutputWorkspace=summed_monitor_ws_name) + + # Delete old workspaces + for idx in range(1, len(workspace_names)): + DeleteWorkspace(workspace_names[idx]) + DeleteWorkspace(workspace_names[idx] + '_mon') + + # Derive the scale factor based on number of merged workspaces + scale_factor = 1.0 / len(workspace_names) + logger.information('Scale factor for summed workspaces: %f' % scale_factor) + + # Scale the new detector and monitor workspaces + Scale(InputWorkspace=summed_detector_ws_name, + OutputWorkspace=summed_detector_ws_name, + Factor=scale_factor) + Scale(InputWorkspace=summed_monitor_ws_name, + OutputWorkspace=summed_monitor_ws_name, + Factor=scale_factor) + + # Add the list of run numbers to the result workspace as a sample log + AddSampleLog(Workspace=summed_detector_ws_name, LogName='multi_run_numbers', + LogType='String', LogText=run_numbers) + + # Only have the one workspace now + return [summed_detector_ws_name] + +#------------------------------------------------------------------------------- + +def sum_chopped_runs(workspace_names): + """ + Sum runs with chopped data. + """ + from mantid.simpleapi import (MergeRuns, Scale, DeleteWorkspace) + + try: + num_merges = len(mtd[workspace_names[0]].getNames()) + except: + raise RuntimeError('Not all runs have been chopped, cannot sum.') + + merges = list() + + # Generate a list of workspaces to be merged + for idx in range(0, num_merges): + merges.append({'detector':list(), 'monitor':list()}) + + for ws_name in workspace_names: + detector_ws_name = mtd[ws_name].getNames()[idx] + monitor_ws_name = detector_ws_name + '_mon' + + merges[idx]['detector'].append(detector_ws_name) + merges[idx]['monitor'].append(monitor_ws_name) + + for merge in merges: + # Merge the chopped run segments + MergeRuns(InputWorkspaces=','.join(merge['detector']), + OutputWorkspace=merge['detector'][0]) + MergeRuns(InputWorkspaces=','.join(merge['monitor']), + OutputWorkspace=merge['monitor'][0]) + + # Scale the merged runs + merge_size = len(merge['detector']) + factor = 1.0 / merge_size + Scale(InputWorkspace=merge['detector'][0], + OutputWorkspace=merge['detector'][0], + Factor=factor, + Operation='Multiply') + Scale(InputWorkspace=merge['monitor'][0], + OutputWorkspace=merge['monitor'][0], + Factor=factor, + Operation='Multiply') + + # Remove the old workspaces + for idx in range(1, merge_size): + DeleteWorkspace(merge['detector'][idx]) + DeleteWorkspace(merge['monitor'][idx]) + + # Only have the one workspace now + return [workspace_names[0]] + +#------------------------------------------------------------------------------- + +def identify_bad_detectors(workspace_name): + """ + Identify detectors which should be masked + + @param workspace_name Name of worksapce to use ot get masking detectors + @return List of masked spectra + """ + from mantid.simpleapi import (IdentifyNoisyDetectors, DeleteWorkspace) + + instrument = mtd[workspace_name].getInstrument() + + try: + masking_type = instrument.getStringParameter('Workflow.Masking')[0] + except IndexError: + masking_type = 'None' + + logger.information('Masking type: %s' % (masking_type)) + + masked_spec = list() + + if masking_type == 'IdentifyNoisyDetectors': + ws_mask = '__workspace_mask' + IdentifyNoisyDetectors(InputWorkspace=workspace_name, + OutputWorkspace=ws_mask) + + # Convert workspace to a list of spectra + num_spec = mtd[ws_mask].getNumberHistograms() + masked_spec = [spec for spec in range(0, num_spec) if mtd[ws_mask].readY(spec)[0] == 0.0] + + # Remove the temporary masking workspace + DeleteWorkspace(ws_mask) + + logger.debug('Masked specta for workspace %s: %s' % (workspace_name, str(masked_spec))) + + return masked_spec + +#------------------------------------------------------------------------------- + +def unwrap_monitor(workspace_name): + """ + Unwrap monitor if required based on value of Workflow.UnwrapMonitor parameter + + @param workspace_name Name of workspace + @return True if the monitor was unwrapped + """ + from mantid.simpleapi import (UnwrapMonitor, RemoveBins, FFTSmooth) + + monitor_workspace_name = workspace_name + '_mon' + instrument = mtd[monitor_workspace_name].getInstrument() + + # Determine if the monitor should be unwrapped + try: + unwrap = instrument.getStringParameter('Workflow.UnwrapMonitor')[0] + + if unwrap == 'Always': + should_unwrap = True + elif unwrap == 'BaseOnTimeRegime': + mon_time = mtd[monitor_workspace_name].readX(0)[0] + det_time = mtd[workspace_name].readX(0)[0] + logger.notice(str(mon_time) + " " + str(det_time)) + should_unwrap = mon_time == det_time + else: + should_unwrap = False + + except IndexError: + should_unwrap = False + + logger.debug('Need to unwrap monitor for %s: %s' % (workspace_name, str(should_unwrap))) + + if should_unwrap: + sample = instrument.getSample() + sample_to_source = sample.getPos() - instrument.getSource().getPos() + radius = mtd[workspace_name].getDetector(0).getDistance(sample) + z_dist = sample_to_source.getZ() + l_ref = z_dist + radius + + logger.debug('For workspace %s: radius=%d, z_dist=%d, l_ref=%d' % + (workspace_name, radius, z_dist, l_ref)) + + _, join = UnwrapMonitor(InputWorkspace=monitor_workspace_name, + OutputWorkspace=monitor_workspace_name, + LRef=l_ref) + + RemoveBins(InputWorkspace=monitor_workspace_name, + OutputWorkspace=monitor_workspace_name, + XMin=join - 0.001, XMax=join + 0.001, + Interpolation='Linear') + + try: + FFTSmooth(InputWorkspace=monitor_workspace_name, + OutputWorkspace=monitor_workspace_name, + WorkspaceIndex=0) + except ValueError: + raise ValueError('Uneven bin widths are not supported.') + + return should_unwrap + +#------------------------------------------------------------------------------- + +def process_monitor_efficiency(workspace_name): + """ + Process monitor efficiency for a given workspace. + + @param workspace_name Name of workspace to process monitor for + """ + from mantid.simpleapi import OneMinusExponentialCor + + monitor_workspace_name = workspace_name + '_mon' + instrument = mtd[workspace_name].getInstrument() + + try: + area = instrument.getNumberParameter('Workflow.Monitor1-Area')[0] + thickness = instrument.getNumberParameter('Workflow.Monitor1-Thickness')[0] + attenuation = instrument.getNumberParameter('Workflow.Monitor1-Attenuation')[0] + except IndexError: + raise ValueError('Cannot get monitor details form parameter file') + + if area == -1 or thickness == -1 or attenuation == -1: + logger.information('For workspace %s, skipping monitor efficiency' % (workspace_name)) + return + + OneMinusExponentialCor(InputWorkspace=monitor_workspace_name, + OutputWorkspace=monitor_workspace_name, + C=attenuation * thickness, + C1=area) + +#------------------------------------------------------------------------------- + +def scale_monitor(workspace_name): + """ + Scale monitor intensity by a factor given as the Workflow.MonitorScalingFactor parameter. + + @param workspace_name Name of workspace to process monitor for + """ + from mantid.simpleapi import Scale + + monitor_workspace_name = workspace_name + '_mon' + instrument = mtd[workspace_name].getInstrument() + + try: + scale_factor = instrument.getNumberParameter('Workflow.Monitor1-ScalingFactor')[0] + except IndexError: + logger.information('No monitor scaling factor found for workspace %s' % workspace_name) + return + + if scale_factor != 1.0: + Scale(InputWorkspace=monitor_workspace_name, + OutputWorkspace=monitor_workspace_name, + Factor=1.0 / scale_factor, + Operation='Multiply') + +#------------------------------------------------------------------------------- + +def scale_detectors(workspace_name, e_mode='Indirect'): + """ + Scales detectors by monitor intensity. + + @param workspace_name Name of detector workspace + @param e_mode Energy mode (Indirect for spectroscopy, Elastic for diffraction) + """ + from mantid.simpleapi import (ConvertUnits, RebinToWorkspace, Divide) + + monitor_workspace_name = workspace_name + '_mon' + + ConvertUnits(InputWorkspace=workspace_name, + OutputWorkspace=workspace_name, + Target='Wavelength', + EMode=e_mode) + + RebinToWorkspace(WorkspaceToRebin=workspace_name, + WorkspaceToMatch=monitor_workspace_name, + OutputWorkspace=workspace_name) + + Divide(LHSWorkspace=workspace_name, + RHSWorkspace=monitor_workspace_name, + OutputWorkspace=workspace_name) + +#------------------------------------------------------------------------------- + +def group_spectra(workspace_name, masked_detectors, method, group_file=None, group_ws=None): + """ + Groups spectra in a given workspace according to the Workflow.GroupingMethod and + Workflow.GroupingFile parameters and GrpupingPolicy property. + + @param workspace_name Name of workspace to group spectra of + @param masked_detectors List of spectra numbers to mask + @param method Grouping method (IPF, All, Individual, File, Workspace) + @param group_file File for File method + @param group_ws Workspace for Workspace method + """ + from mantid.simpleapi import (MaskDetectors, GroupDetectors) + + instrument = mtd[workspace_name].getInstrument() + + # If grouping as per he IPF is desired + if method == 'IPF': + # Get the grouping method from the parameter file + try: + grouping_method = instrument.getStringParameter('Workflow.GroupingMethod')[0] + except IndexError: + grouping_method = 'Individual' + + else: + # Otherwise use the value of GroupingPolicy + grouping_method = method + + logger.information('Grouping method for workspace %s is %s' % (workspace_name, grouping_method)) + + if grouping_method == 'Individual': + # Nothing to do here + return + + elif grouping_method == 'All': + # Get a list of all spectra minus those which are masked + num_spec = mtd[workspace_name].getNumberHistograms() + spectra_list = [spec for spec in range(0, num_spec) if spec not in masked_detectors] + + # Apply the grouping + GroupDetectors(InputWorkspace=workspace_name, + OutputWorkspace=workspace_name, + Behaviour='Average', + WorkspaceIndexList=spectra_list) + + elif grouping_method == 'File': + # Get the filename for the grouping file + if group_file is not None: + grouping_file = group_file + else: + try: + grouping_file = instrument.getStringParameter('Workflow.GroupingFile')[0] + except IndexError: + raise RuntimeError('Cannot get grouping file from properties or IPF.') + + # If the file is not found assume it is in the grouping files directory + if not os.path.isfile(grouping_file): + grouping_file = os.path.join(config.getString('groupingFiles.directory'), grouping_file) + + # If it is still not found just give up + if not os.path.isfile(grouping_file): + raise RuntimeError('Cannot find grouping file: %s' % (grouping_file)) + + # Mask detectors if required + if len(masked_detectors) > 0: + MaskDetectors(Workspace=workspace_name, + WorkspaceIndexList=masked_detectors) + + # Apply the grouping + GroupDetectors(InputWorkspace=workspace_name, + OutputWorkspace=workspace_name, + Behaviour='Average', + MapFile=grouping_file) + + elif grouping_method == 'Workspace': + # Apply the grouping + GroupDetectors(InputWorkspace=workspace_name, + OutputWorkspace=workspace_name, + Behaviour='Average', + CopyGroupingFromWorkspace=group_ws) + + else: + raise RuntimeError('Invalid grouping method %s for workspace %s' % (grouping_method, workspace_name)) + +#------------------------------------------------------------------------------- + +def fold_chopped(workspace_name): + """ + Folds multiple frames of a data set into one workspace. + + @param workspace_name Name of the group to fold + """ + from mantid.simpleapi import (MergeRuns, DeleteWorkspace, CreateWorkspace, + Divide) + + workspaces = mtd[workspace_name].getNames() + merged_ws = workspace_name + '_merged' + MergeRuns(InputWorkspaces=','.join(workspaces), OutputWorkspace=merged_ws) + + scaling_ws = '__scaling_ws' + unit = mtd[workspace_name].getItem(0).getAxis(0).getUnit().unitID() + + ranges = [] + for ws in mtd[workspace_name].getNames(): + x_min = mtd[ws].dataX(0)[0] + x_max = mtd[ws].dataX(0)[-1] + ranges.append((x_min, x_max)) + DeleteWorkspace(Workspace=ws) + + data_x = mtd[merged_ws].readX(0) + data_y = [] + data_e = [] + + for i in range(0, mtd[merged_ws].blocksize()): + y_val = 0.0 + for rng in ranges: + if data_x[i] >= rng[0] and data_x[i] <= rng[1]: + y_val += 1.0 + + data_y.append(y_val) + data_e.append(0.0) + + CreateWorkspace(OutputWorkspace=scaling_ws, + DataX=data_x, + DataY=data_y, + DataE=data_e, + UnitX=unit) + + Divide(LHSWorkspace=merged_ws, + RHSWorkspace=scaling_ws, + OutputWorkspace=workspace_name) + + DeleteWorkspace(Workspace=merged_ws) + DeleteWorkspace(Workspace=scaling_ws) + +#------------------------------------------------------------------------------- + +def rename_reduction(workspace_name, multiple_files): + """ + Renames a worksapce according to the naming policy in the Workflow.NamingConvention parameter. + + @param workspace_name Name of workspace + @param multiple_files Insert the multiple file marker + @return New name of workspace + """ + from mantid.simpleapi import RenameWorkspace + import string + + is_multi_frame = isinstance(mtd[workspace_name], WorkspaceGroup) + + # Get the instrument + if is_multi_frame: + instrument = mtd[workspace_name].getItem(0).getInstrument() + else: + instrument = mtd[workspace_name].getInstrument() + + # Get the naming convention parameter form the parameter file + try: + convention = instrument.getStringParameter('Workflow.NamingConvention')[0] + except IndexError: + # Defualt to run title if naming convention parameter not set + convention = 'RunTitle' + logger.information('Naming convention for workspace %s is %s' % (workspace_name, convention)) + + # Get run number + if is_multi_frame: + run_number = mtd[workspace_name].getItem(0).getRun()['run_number'].value + else: + run_number = mtd[workspace_name].getRun()['run_number'].value + logger.information('Run number for workspace %s is %s' % (workspace_name, run_number)) + + inst_name = instrument.getName() + for facility in config.getFacilities(): + try: + short_inst_name = facility.instrument(inst_name).shortName() + break + except _: + pass + logger.information('Short name for instrument %s is %s' % (inst_name, short_inst_name)) + + # Get run title + if is_multi_frame: + run_title = mtd[workspace_name].getItem(0).getRun()['run_title'].value.strip() + else: + run_title = mtd[workspace_name].getRun()['run_title'].value.strip() + logger.information('Run title for workspace %s is %s' % (workspace_name, run_title)) + + if multiple_files: + multi_run_marker = '_multi' + else: + multi_run_marker = '' + + if convention == 'None': + new_name = workspace_name + + elif convention == 'RunTitle': + valid = "-_.() %s%s" % (string.ascii_letters, string.digits) + formatted_title = ''.join([c for c in run_title if c in valid]) + new_name = '%s%s%s-%s' % (short_inst_name.lower(), run_number, multi_run_marker, formatted_title) + + elif convention == 'AnalyserReflection': + analyser = instrument.getStringParameter('analyser')[0] + reflection = instrument.getStringParameter('reflection')[0] + new_name = '%s%s%s_%s%s_red' % (short_inst_name.upper(), run_number, multi_run_marker, + analyser, reflection) + + else: + raise RuntimeError('No valid naming convention for workspace %s' % workspace_name) + + logger.information('New name for %s workspace: %s' % (workspace_name, new_name)) + + RenameWorkspace(InputWorkspace=workspace_name, + OutputWorkspace=new_name) + + return new_name + +#------------------------------------------------------------------------------- + +def plot_reduction(workspace_name, plot_type): + """ + Plot a given workspace based on the Plot property. + + @param workspace_name Name of workspace to plot + @param plot_types Type of plot to create + """ + + if plot_type == 'Spectra' or plot_type == 'Both': + from mantidplot import plotSpectrum + num_spectra = mtd[workspace_name].getNumberHistograms() + try: + plotSpectrum(workspace_name, range(0, num_spectra)) + except RuntimeError: + logger.notice('Spectrum plotting canceled by user') + + can_plot_contour = mtd[workspace_name].getNumberHistograms() > 1 + if (plot_type == 'Contour' or plot_type == 'Both') and can_plot_contour: + from mantidplot import importMatrixWorkspace + plot_workspace = importMatrixWorkspace(workspace_name) + plot_workspace.plotGraph2D() + +#------------------------------------------------------------------------------- + +def save_reduction(worksspace_names, formats, x_units='DeltaE'): + """ + Saves the workspaces to the default save directory. + + @param worksspace_names List of workspace names to save + @param formats List of formats to save in + @param Output X units + """ + from mantid.simpleapi import (SaveSPE, SaveNexusProcessed, SaveNXSPE, + SaveAscii, Rebin, DeleteWorkspace, + ConvertSpectrumAxis, SaveDaveGrp) + + for workspace_name in worksspace_names: + if 'spe' in formats: + SaveSPE(InputWorkspace=workspace_name, + Filename=workspace_name + '.spe') + + if 'nxs' in formats: + SaveNexusProcessed(InputWorkspace=workspace_name, + Filename=workspace_name + '.nxs') + + if 'nxspe' in formats: + SaveNXSPE(InputWorkspace=workspace_name, + Filename=workspace_name + '.nxspe') + + if 'ascii' in formats: + # Version 1 of SaveAscii produces output that works better with excel/origin + # For some reason this has to be done with an algorithm object, using the function + # wrapper with Version did not change the version that was run + saveAsciiAlg = AlgorithmManager.createUnmanaged('SaveAscii', 1) + saveAsciiAlg.initialize() + saveAsciiAlg.setProperty('InputWorkspace', workspace_name) + saveAsciiAlg.setProperty('Filename', workspace_name + '.dat') + saveAsciiAlg.execute() + + if 'aclimax' in formats: + if x_units == 'DeltaE_inWavenumber': + bins = '24, -0.005, 4000' #cm-1 + else: + bins = '3, -0.005, 500' #meV + + Rebin(InputWorkspace=workspace_name, + OutputWorkspace=workspace_name + '_aclimax_save_temp', + Params=bins) + SaveAscii(InputWorkspace=workspace_name + '_aclimax_save_temp', + Filename=workspace_name + '_aclimax.dat', + Separator='Tab') + DeleteWorkspace(Workspace=workspace_name + '_aclimax_save_temp') + + if 'davegrp' in formats: + ConvertSpectrumAxis(InputWorkspace=workspace_name, + OutputWorkspace=workspace_name + '_davegrp_save_temp', + Target='ElasticQ', + EMode='Indirect') + SaveDaveGrp(InputWorkspace=workspace_name + '_davegrp_save_temp', + Filename=workspace_name + '.grp') + DeleteWorkspace(Workspace=workspace_name + '_davegrp_save_temp') + +#------------------------------------------------------------------------------- + +def get_multi_frame_rebin(workspace_name, rebin_string): + """ + Creates a rebin string for rebinning multiple frames data. + + @param workspace_name Name of multiple frame workspace group + @param rebin_string Original rebin string + + @return New rebin string + @return Maximum number of bins in input workspaces + """ + + multi_frame = isinstance(mtd[workspace_name], WorkspaceGroup) + + if rebin_string is not None and multi_frame: + rebin_string_comp = rebin_string.split(',') + if len(rebin_string_comp) >= 5: + rebin_string_2 = ','.join(rebin_string_comp[2:]) + else: + rebin_string_2 = rebin_string + + bin_counts = [mtd[ws].blocksize() for ws in mtd[workspace_name].getNames()] + num_bins = np.amax(bin_counts) + + return rebin_string_2, num_bins + + return None, None + +#------------------------------------------------------------------------------- + +def rebin_reduction(workspace_name, rebin_string, multi_frame_rebin_string, num_bins): + """ + @param workspace_name Name of workspace to rebin + @param rebin_string Rebin parameters + @param multi_frame_rebin_string Rebin string for multiple frame rebinning + @param num_bins Max number of bins in input frames + """ + from mantid.simpleapi import (Rebin, RebinToWorkspace) + + if rebin_string is not None: + if multi_frame_rebin_string is not None and num_bins is not None: + # Multi frame data + if mtd[workspace_name].blocksize() == num_bins: + Rebin(InputWorkspace=workspace_name, + OutputWorkspace=workspace_name, + Params=rebin_string) + else: + Rebin(InputWorkspace=workspace_name, + OutputWorkspace=workspace_name, + Params=multi_frame_rebin_string) + else: + # Regular data + Rebin(InputWorkspace=workspace_name, + OutputWorkspace=workspace_name, + Params=rebin_string) + else: + try: + # If user does not want to rebin then just ensure uniform binning across spectra + RebinToWorkspace(WorkspaceToRebin=workspace_name, + WorkspaceToMatch=workspace_name, + OutputWorkspace=workspace_name) + except RuntimeError: + logger.warning('Rebinning failed, will try to continue anyway.') + +#------------------------------------------------------------------------------- diff --git a/Code/Mantid/scripts/Inelastic/inelastic_indirect_reducer.py b/Code/Mantid/scripts/Inelastic/inelastic_indirect_reducer.py deleted file mode 100644 index 26702c6cd4bf..000000000000 --- a/Code/Mantid/scripts/Inelastic/inelastic_indirect_reducer.py +++ /dev/null @@ -1,146 +0,0 @@ -## IndirectEnergyConversionReducer class -from mantid.simpleapi import * - -from msg_reducer import MSGReducer -import inelastic_indirect_reduction_steps as steps - -class IndirectReducer(MSGReducer): - """Reducer class for Inelastic Indirect Spectroscopy. - - Example for use: - >> import inelastic_indirect_reducer as iir - >> reducer = iir.IndirectReducer() - >> reducer.set_instrument_name('IRIS') - >> reducer.set_parameter_file('IRIS_graphite_002_Parameters.xml') - >> reducer.set_detector_range(2,52) - >> reducer.append_data_file('IRS21360.raw') - >> reducer.reduce() - - Will perform the same steps as the ConvertToEnergy interface does on the - default settings. - """ - - _grouping_policy = None - _calibration_workspace = None - _background_start = None - _background_end = None - _detailed_balance_temp = None - _rename_result = True - _save_to_cm_1 = False - _scale_factor = None - - def __init__(self): - """ - """ - super(IndirectReducer, self).__init__() - self._grouping_policy = None - self._calibration_workspace = None - self._background_start = None - self._background_end = None - self._detailed_balance_temp = None - self._rename_result = True - self._scale_factor = None - - def _setup_steps(self): - """**NB: This function is run automatically by the base reducer class - and so does not require user interaction.** - Setup the steps for the reduction. Please refer to the individual - steps for details on their operation. - """ - - step = steps.IdentifyBadDetectors(MultipleFrames=self._multiple_frames) - self.append_step(step) - - # "HandleMonitor" converts the monitor to Wavelength, possibly Unwraps - step = steps.HandleMonitor(MultipleFrames=self._multiple_frames) - self.append_step(step) - - # "BackgroundOperations" just does a CalculateFlatBackground at the moment, - # will be extended for SNS stuff - if (self._background_start is not None and - self._background_end is not None): - step = steps.BackgroundOperations( - MultipleFrames=self._multiple_frames) - step.set_range(self._background_start, self._background_end) - self.append_step(step) - - # "ApplyCalibration" divides the workspace by the calibration workspace - if self._calibration_workspace is not None: - step = steps.ApplyCalibration() - step.set_is_multiple_frames(self._multiple_frames) - step.set_calib_workspace(self._calibration_workspace) - self.append_step(step) - - # "CorrectByMonitor" converts the data into Wavelength, then divides by - # the monitor workspace. - step = steps.CorrectByMonitor(MultipleFrames=self._multiple_frames) - self.append_step(step) - - # "ConvertToEnergy" runs ConvertUnits to DeltaE, CorrectKiKf, and also - # Rebin if a rebin string has been specified. - step = steps.ConvertToEnergy(MultipleFrames=self._multiple_frames) - step.set_rebin_string(self._rebin_string) - self.append_step(step) - - if self._detailed_balance_temp is not None: - step = steps.DetailedBalance(MultipleFrames=self._multiple_frames) - step.set_temperature(self._detailed_balance_temp) - self.append_step(step) - - # Multiplies the scale by the factor specified. - if self._scale_factor is not None: - step = steps.Scaling(MultipleFrames=self._multiple_frames) - step.set_scale_factor(self._scale_factor) - self.append_step(step) - - step = steps.Grouping(MultipleFrames=self._multiple_frames) - step.set_grouping_policy(self._grouping_policy) - self.append_step(step) - - # "FoldData" puts workspaces that have been chopped back together. - if self._multiple_frames: - if self._fold_multiple_frames: - self.append_step(steps.FoldData()) - else: - return - - step = steps.ConvertToCm1(MultipleFrames=self._multiple_frames) - step.set_save_to_cm_1(self._save_to_cm_1) - self.append_step(step) - - # The "SaveItem" step saves the files in the requested formats. - if len(self._save_formats) > 0: - step = steps.SaveItem() - step.set_formats(self._save_formats) - step.set_save_to_cm_1(self._save_to_cm_1) - self.append_step(step) - - if self._rename_result: - step = steps.Naming() - self.append_step(step) - - def set_grouping_policy(self, policy): - self._grouping_policy = policy - - def set_save_to_cm_1(self, save_to_cm_1): - self._save_to_cm_1 = save_to_cm_1 - - def set_calibration_workspace(self, workspace): - if not mtd.doesExist(workspace): - raise ValueError("Selected calibration workspace not found.") - self._calibration_workspace = workspace - - def set_background(self, start, end): - self._background_start = float(start) - self._background_end = float(end) - - def set_detailed_balance(self, temp): - self._detailed_balance_temp = float(temp) - - def set_scale_factor(self, scaleFactor): - self._scale_factor = float(scaleFactor) - - def set_rename(self, value): - if not isinstance(value, bool): - raise TypeError("value must be either True or False (boolean)") - self._rename_result = value diff --git a/Code/Mantid/scripts/Inelastic/inelastic_indirect_reduction_steps.py b/Code/Mantid/scripts/Inelastic/inelastic_indirect_reduction_steps.py deleted file mode 100644 index 70bec0c356c9..000000000000 --- a/Code/Mantid/scripts/Inelastic/inelastic_indirect_reduction_steps.py +++ /dev/null @@ -1,1010 +0,0 @@ -#pylint: disable=invalid-name,no-init -from reduction.reducer import ReductionStep - -import mantid -from mantid import config -from mantid.simpleapi import * -from mantid.api import IEventWorkspace - -import string -import os - - -class LoadData(ReductionStep): - """Handles the loading of the data for Indirect instruments. The summing - of input workspaces is handled in this routine, as well as the identifying - of detectors that require masking. - - This step will use the following parameters from the Instrument's parameter - file: - - * Workflow.ChopDataIfGreaterThan - if this parameter is specified on the - instrument, then the raw data will be split into multiple frames if - the largest TOF (X) value in the workspace is greater than the provided - value. - """ - - _multiple_frames = False - _sum = False - _load_logs = False - _monitor_index = None - _detector_range_start = None - _detector_range_end = None - _masking_detectors = [] - _parameter_file = None - _data_files = {} - _extra_load_opts = {} - _contains_event_data = False - _reducer = None - - def __init__(self): - """Initialise the ReductionStep. Constructor should set the initial - parameters for the step. - """ - super(LoadData, self).__init__() - self._sum = False - self._load_logs = False - self._multiple_frames = False - self._monitor_index = None - self._detector_range_start = None - self._detector_range_end = None - self._parameter_file = None - self._data_files = {} - - def execute(self, reducer, file_ws): - """Loads the data. - """ - self._reducer = reducer - wsname = '' - - for output_ws, filename in self._data_files.iteritems(): - try: - self._load_single_file(filename,output_ws) - if wsname == "": - wsname = output_ws - except RuntimeError, exc: - logger.warning("Error loading '%s': %s. File skipped" % (filename, str(exc))) - continue - - if ( self._sum ) and ( len(self._data_files) > 1 ): - ## Sum files - merges = [] - if self._multiple_frames : - self._sum_chopped(wsname) - else: - self._sum_regular(wsname) - ## Need to adjust the reducer's list of workspaces - self._data_files = {} - self._data_files[wsname] = wsname - - def set_load_logs(self, value): - self._load_logs = value - - def set_sum(self, value): - self._sum = value - - def set_parameter_file(self, value): - self._parameter_file = value - - def set_detector_range(self, start, end): - self._detector_range_start = start - self._detector_range_end = end - - def set_extra_load_opts(self, opts): - self._extra_load_opts = opts - - def set_ws_list(self, value): - self._data_files = value - - def get_ws_list(self): - return self._data_files - - def contains_event_data(self): - return self._contains_event_data - - def _load_single_file(self, filename, output_ws): - logger.notice("Loading file %s" % filename) - - self._load_data(filename, output_ws) - - if type(mtd[output_ws]) is IEventWorkspace: - self._contains_event_data = True - - inst_name = mtd[output_ws].getInstrument().getName() - if inst_name == 'BASIS': - ModeratorTzeroLinear(InputWorkspace=output_ws,OutputWorkspace= output_ws) - basis_mask = mtd[output_ws].getInstrument().getStringParameter( - 'Workflow.MaskFile')[0] - # Quick hack for older BASIS files that only have one side - #if (mtd[file].getRun()['run_number'] < 16693): - # basis_mask = "BASIS_Mask_before_16693.xml" - basis_mask_filename = os.path.join(config.getString('maskFiles.directory')\ - , basis_mask) - if os.path.isfile(basis_mask_filename): - LoadMask(Instrument="BASIS", OutputWorkspace="__basis_mask",\ - InputFile=basis_mask_filename) - MaskDetectors(Workspace=output_ws, MaskedWorkspace="__basis_mask") - else: - logger.notice("Couldn't find specified mask file : " + str(basis_mask_filename)) - - if self._parameter_file != None: - LoadParameterFile(Workspace=output_ws,Filename= self._parameter_file) - - self._monitor_index = self._reducer._get_monitor_index(mtd[output_ws]) - - if self._require_chop_data(output_ws): - ChopData(InputWorkspace=output_ws,OutputWorkspace= output_ws,Step= 20000.0,NChops= 5, IntegrationRangeLower=5000.0,\ - IntegrationRangeUpper=10000.0,\ - MonitorWorkspaceIndex=self._monitor_index) - self._multiple_frames = True - else: - self._multiple_frames = False - - if self._multiple_frames : - workspaces = mtd[output_ws].getNames() - else: - workspaces = [output_ws] - - logger.debug('self._monitor_index = ' + str(self._monitor_index)) - - for ws in workspaces: - if isinstance(mtd[ws],mantid.api.IEventWorkspace): - LoadNexusMonitors(Filename=self._data_files[output_ws], - OutputWorkspace= ws+'_mon') - else: - ## Extract Monitor Spectrum - ExtractSingleSpectrum(InputWorkspace=ws,OutputWorkspace= ws+'_mon',WorkspaceIndex= self._monitor_index) - - if self._detector_range_start < 0 or self._detector_range_end > mtd[ws].getNumberHistograms(): - raise ValueError("Range %d - %d is not a valid detector range." % (self._detector_range_start, self._detector_range_end)) - - ## Crop the workspace to remove uninteresting detectors - CropWorkspace(InputWorkspace=ws,OutputWorkspace= ws,\ - StartWorkspaceIndex=self._detector_range_start,\ - EndWorkspaceIndex=self._detector_range_end) - - def _load_data(self, filename, output_ws): - if self._parameter_file is not None and "VESUVIO" in self._parameter_file: - loaded_ws = LoadVesuvio(Filename=filename, OutputWorkspace=output_ws, SpectrumList="1-198", **self._extra_load_opts) - else: - # loaded_ws = Load(Filename=filename, OutputWorkspace=output_ws, LoadLogFiles=False, **self._extra_load_opts) - if self._load_logs == True: - loaded_ws = Load(Filename=filename, OutputWorkspace=output_ws, LoadLogFiles=True, **self._extra_load_opts) - logger.notice("Loaded sample logs") - else: - loaded_ws = Load(Filename=filename, OutputWorkspace=output_ws, LoadLogFiles=False, **self._extra_load_opts) - - def _sum_regular(self, wsname): - merges = [[], []] - run_numbers = [] - for ws in self._data_files: - merges[0].append(ws) - merges[1].append(ws + '_mon') - run_numbers.append(str(mtd[ws].getRunNumber())) - - MergeRuns(InputWorkspaces=','.join(merges[0]), OutputWorkspace=wsname) - MergeRuns(InputWorkspaces=','.join(merges[1]), OutputWorkspace=wsname + '_mon') - - AddSampleLog(Workspace=wsname, LogName='multi_run_numbers', LogType='String', - LogText=','.join(run_numbers)) - - for n in range(1, len(merges[0])): - DeleteWorkspace(Workspace=merges[0][n]) - DeleteWorkspace(Workspace=merges[1][n]) - - factor = 1.0 / len(self._data_files) - Scale(InputWorkspace=wsname, OutputWorkspace=wsname, Factor=factor) - Scale(InputWorkspace=wsname + '_mon', OutputWorkspace=wsname + '_mon', Factor=factor) - - def _sum_chopped(self, wsname): - merges = [] - nmerges = len(mtd[wsname].getNames()) - - for n in range(0, nmerges): - merges.append([]) - merges.append([]) - - for file in self._data_files: - try: - merges[2 * n].append(mtd[file].getNames()[n]) - merges[2 * n + 1].append(mtd[file].getNames()[n] + '_mon') - except AttributeError: - if n == 0: - merges[0].append(file) - merges[1].append(file + '_mon') - - for merge in merges: - MergeRuns(InputWorkspaces=','.join(merge), OutputWorkspace=merge[0]) - factor = 1.0 / len(merge) - Scale(InputWorkspace=merge[0], OutputWorkspace=merge[0], Factor=factor) - - for n in range(1, len(merge)): - DeleteWorkspace(Workspace=merge[n]) - - def _require_chop_data(self, ws): - try: - cdigt = mtd[ws].getInstrument().getNumberParameter( - 'Workflow.ChopDataIfGreaterThan')[0] - except IndexError: - return False - if mtd[ws].readX(0)[mtd[ws].blocksize()] > cdigt : - return True - else: - return False - - def is_multiple_frames(self): - return self._multiple_frames - -#-------------------------------------------------------------------------------------------------- - -class IdentifyBadDetectors(ReductionStep): - """ Identifies bad detectors in a workspace and creates a list of - detectors to mask. This step will set the masking detectors property on - the reducer object passed to execute. This uses the IdentifyNoisyDetectors algorithm. - - The step will use the following parameters on the workspace: - - * Workflow.Masking - identifies the method (if any) on which detectors that - are to be masked should be identified. - """ - - _masking_detectors = [] - - def __init__(self, MultipleFrames=False): - super(IdentifyBadDetectors, self).__init__() - self._multiple_frames = MultipleFrames - self._background_start = None - self._background_end = None - - def execute(self, reducer, file_ws): - - if self._multiple_frames: - try: - workspaces = mtd[file_ws].getNames() - except AttributeError: - workspaces = [file_ws] - else: - workspaces = [file_ws] - - try: - msk = mtd[workspaces[0]].getInstrument().getStringParameter('Workflow.Masking')[0] - except IndexError: - msk = 'None' - - if msk != 'IdentifyNoisyDetectors': - return - - temp_ws_mask = '__temp_ws_mask' - IdentifyNoisyDetectors(InputWorkspace=workspaces[0], OutputWorkspace=temp_ws_mask) - ws = mtd[temp_ws_mask] - nhist = ws.getNumberHistograms() - - for i in range(0, nhist): - if ws.readY(i)[0] == 0.0: - self._masking_detectors.append(i) - DeleteWorkspace(Workspace=temp_ws_mask) - - #set the detector masks for the workspace - reducer._masking_detectors[file_ws] = self._masking_detectors - - def get_mask_list(self): - return self._masking_detectors - -#-------------------------------------------------------------------------------------------------- - -class BackgroundOperations(ReductionStep): - """Removes, if requested, a background from the detectors data in TOF - units. Currently only uses the CalculateFlatBackground algorithm, more options - to cover SNS use to be added at a later point. - """ - _multiple_frames = False - _background_start = None - _background_end = None - - def __init__(self, MultipleFrames=False): - super(BackgroundOperations, self).__init__() - self._multiple_frames = MultipleFrames - self._background_start = None - self._background_end = None - - def execute(self, reducer, file_ws): - if self._multiple_frames : - try: - workspaces = mtd[file_ws].getNames() - except AttributeError: - workspaces = [file_ws] - else: - workspaces = [file_ws] - - for ws in workspaces: - ConvertToDistribution(Workspace=ws) - CalculateFlatBackground(InputWorkspace=ws,OutputWorkspace= ws,StartX= self._background_start,\ - EndX=self._background_end, Mode='Mean') - ConvertFromDistribution(Workspace=ws) - - def set_range(self, start, end): - self._background_start = start - self._background_end = end - -class ApplyCalibration(ReductionStep): - """Applies a calibration workspace to the data. - """ - - _multiple_frames = False - _calib_workspace = None - - def __init__(self): - super(ApplyCalibration, self).__init__() - self._multiple_frames = False - self._calib_workspace = None - - def execute(self, reducer, file_ws): - if self._calib_workspace is None: # No calibration workspace set - return - if self._multiple_frames : - try: - workspaces = mtd[file_ws].getNames() - except AttributeError: - workspaces = [file_ws] - else: - workspaces = [file_ws] - - for ws in workspaces: - Divide(LHSWorkspace=ws,RHSWorkspace= self._calib_workspace,OutputWorkspace= ws) - - def set_is_multiple_frames(self, value): - self._multiple_frames = value - - def set_calib_workspace(self, value): - self._calib_workspace = value - -class HandleMonitor(ReductionStep): - """Handles the montior for the reduction of inelastic indirect data. - - This uses the following parameters from the instrument: - * Workflow.Monitor1-Area - * Workflow.Monitor1-Thickness - * Workflow.Monitor1-ScalingFactor - * Workflow.UnwrapMonitor - """ - _multiple_frames = False - - def __init__(self, MultipleFrames=False): - """Constructor for HandleMonitor routine. - """ - super(HandleMonitor, self).__init__() - self._multiple_frames = MultipleFrames - - def execute(self, reducer, file_ws): - """Does everything we want to with the Monitor. - """ - if self._multiple_frames : - try: - workspaces = mtd[file_ws].getNames() - except AttributeError: - workspaces = [file_ws] - else: - workspaces = [file_ws] - - for ws in workspaces: - monitor = ws+'_mon' - self._rebin_monitor(ws) - if self._need_to_unwrap(ws): - self._unwrap_monitor(ws) - else: - ConvertUnits(InputWorkspace=monitor,OutputWorkspace= monitor,Target= 'Wavelength') - self._monitor_efficiency(monitor) - self._scale_monitor(monitor) - - def _rebin_monitor(self, ws): - """For some instruments (e.g. BASIS) the monitor binning is too - fine and needs to be rebinned. This is controlled - by the 'Workflow.Monitor.RebinStep' parameter set on the - instrument. If no parameter is present, no rebinning will occur. - """ - try: - stepsize = mtd[ws].getInstrument().getNumberParameter('Workflow.Monitor.RebinStep')[0] - except IndexError: - logger.notice("Monitor is not being rebinned.") - else: - Rebin(InputWorkspace=ws+'_mon',OutputWorkspace= ws+'_mon',Params= stepsize) - - def _need_to_unwrap(self, ws): - try: - unwrap = mtd[ws].getInstrument().getStringParameter( - 'Workflow.UnwrapMonitor')[0] - except IndexError: - return False # Default it to not unwrap - if unwrap == 'Never' : - return False - elif unwrap == 'Always' : - return True - elif unwrap == 'BaseOnTimeRegime' : - SpecMon = mtd[ws+'_mon'].readX(0)[0] - SpecDet = mtd[ws].readX(0)[0] - if SpecMon == SpecDet : - return True - else: - return False - else: - return False - - def _unwrap_monitor(self, ws): - l_ref = self._get_reference_length(ws, 0) - monitor = ws+'_mon' - unwrapped_ws, join = UnwrapMonitor(InputWorkspace=monitor, OutputWorkspace=monitor, LRef=l_ref) - RemoveBins(InputWorkspace=monitor,OutputWorkspace= monitor,XMin= join-0.001,XMax= join+0.001,\ - Interpolation='Linear') - try: - FFTSmooth(InputWorkspace=monitor,OutputWorkspace=monitor,WorkspaceIndex=0) - except ValueError: - raise ValueError("Indirect Energy Conversion does not support uneven bin widths.") - - def _get_reference_length(self, ws, index): - workspace = mtd[ws] - instrument = workspace.getInstrument() - sample = instrument.getSample() - source = instrument.getSource() - detector = workspace.getDetector(index) - sample_to_source = sample.getPos() - source.getPos() - r = detector.getDistance(sample) - x = sample_to_source.getZ() - result = x + r - return result - - def _monitor_efficiency(self, monitor): - inst = mtd[monitor].getInstrument() - try: - montiorStr = 'Workflow.Monitor1' - area = inst.getNumberParameter(montiorStr+'-Area')[0] - thickness = inst.getNumberParameter(montiorStr+'-Thickness')[0] - attenuation= inst.getNumberParameter(montiorStr+'-Attenuation')[0] - except IndexError: - raise ValueError('Unable to retrieve monitor thickness, area and '\ - 'attenuation from Instrument Parameter file.') - else: - if area == -1 or thickness == -1 or attenuation == -1: - return - OneMinusExponentialCor(InputWorkspace=monitor,OutputWorkspace= monitor,C= (attenuation * thickness),C1= area) - - def _scale_monitor(self, monitor): - """Some instruments wish to scale their data. Doing this at the - monitor is the most efficient way to do this. This is controlled - by the 'Workflow.MonitorScalingFactor' parameter set on the - instrument. - """ - try: - factor = mtd[monitor].getInstrument().getNumberParameter( - 'Workflow.Monitor1-ScalingFactor')[0] - except IndexError: - print "Monitor is not being scaled." - else: - if factor != 1.0: - Scale(InputWorkspace=monitor,OutputWorkspace= monitor,Factor= ( 1.0 / factor ),Operation= 'Multiply') - -class CorrectByMonitor(ReductionStep): - """ - """ - - _multiple_frames = False - _emode = "Indirect" - - def __init__(self, MultipleFrames=False, EMode="Indirect"): - super(CorrectByMonitor, self).__init__() - self._multiple_frames = MultipleFrames - self._emode = EMode - - def execute(self, reducer, file_ws): - if self._multiple_frames : - try: - workspaces = mtd[file_ws].getNames() - except AttributeError: - workspaces = [file_ws] - else: - workspaces = [file_ws] - - for ws in workspaces: - ConvertUnits(InputWorkspace=ws,OutputWorkspace= ws,Target= "Wavelength",EMode= self._emode) - RebinToWorkspace(WorkspaceToRebin=ws,WorkspaceToMatch= ws+'_mon',OutputWorkspace= ws) - Divide(LHSWorkspace=ws,RHSWorkspace= ws+'_mon',OutputWorkspace= ws) - DeleteWorkspace(Workspace=ws+'_mon') - - def set_emode(self, emode): - """ - """ - self._emode = emode - -class FoldData(ReductionStep): - _result_workspaces = [] - - def __init__(self): - super(FoldData, self).__init__() - self._result_workspaces = [] - - def execute(self, reducer, file_ws): - try: - wsgroup = mtd[file_ws].getNames() - except AttributeError: - return # Not a grouped workspace - ws = file_ws+'_merged' - MergeRuns(InputWorkspaces=','.join(wsgroup),OutputWorkspace= ws) - scaling = self._create_scaling_workspace(wsgroup, ws) - for workspace in wsgroup: - DeleteWorkspace(Workspace=workspace) - Divide(LHSWorkspace=ws,RHSWorkspace= scaling,OutputWorkspace= ws) - DeleteWorkspace(Workspace=scaling) - RenameWorkspace(InputWorkspace=ws,OutputWorkspace= file_ws) - self._result_workspaces.append(file_ws) - - def get_result_workspaces(self): - return self._result_workspaces - - def _create_scaling_workspace(self, wsgroup, merged): - wsname = '__scaling' - unit = '' - ranges = [] - lowest = 0 - highest = 0 - for ws in wsgroup: - if unit == '' : - unit = mtd[ws].getAxis(0).getUnit().unitID() - low = mtd[ws].dataX(0)[0] - high = mtd[ws].dataX(0)[mtd[ws].blocksize()-1] - ranges.append([low, high]) - if low < lowest: lowest = low - if high > highest: highest = high - dataX = mtd[merged].readX(0) - dataY = [] - dataE = [] - for i in range(0, mtd[merged].blocksize()): - dataE.append(0.0) - dataY.append(self._ws_in_range(ranges, dataX[i])) - CreateWorkspace(OutputWorkspace=wsname,DataX= dataX,DataY= dataY,DataE= dataE, UnitX=unit) - return wsname - - def _ws_in_range(self, ranges, xval): - result = 0 - for range in ranges: - if xval >= range[0] and xval <= range[1] : result += 1 - return result - -class ConvertToCm1(ReductionStep): - """ - Converts the workspaces to cm-1. - """ - - _multiple_frames = False - _save_to_cm_1 = False - - def __init__(self, MultipleFrames=False): - super(ConvertToCm1, self).__init__() - self._multiple_frames = MultipleFrames - - def execute(self, reducer, file_ws): - - if self._save_to_cm_1 == False: - return - - if self._multiple_frames : - try: - workspaceNames = mtd[file_ws].getNames() - except AttributeError: - workspaceNames = [file_ws] - else: - workspaceNames = [file_ws] - - for wsName in workspaceNames: - try: - ws = mtd[wsName] - except: - continue - ConvertUnits(InputWorkspace=ws,OutputWorkspace=ws,EMode='Indirect',Target='DeltaE_inWavenumber') - - def set_save_to_cm_1(self, save_to_cm_1): - self._save_to_cm_1 = save_to_cm_1 - -class ConvertToEnergy(ReductionStep): - """ - """ - _rebin_string = None - _multiple_frames = False - - def __init__(self, MultipleFrames=False): - super(ConvertToEnergy, self).__init__() - self._rebin_string = None - self._multiple_frames = MultipleFrames - - def execute(self, reducer, file_ws): - if self._multiple_frames : - try: - workspaces = mtd[file_ws].getNames() - except AttributeError: - workspaces = [file_ws] - else: - workspaces = [file_ws] - - for ws in workspaces: - ConvertUnits(InputWorkspace=ws,OutputWorkspace= ws,Target= 'DeltaE',EMode= 'Indirect') - CorrectKiKf(InputWorkspace=ws,OutputWorkspace= ws,EMode= 'Indirect') - if self._rebin_string is not None: - if not self._multiple_frames: - Rebin(InputWorkspace=ws,OutputWorkspace= ws,Params= self._rebin_string) - else: - try: - # Rebin whole workspace to first spectrum to allow grouping to proceed - RebinToWorkspace(WorkspaceToRebin=ws,WorkspaceToMatch=ws, - OutputWorkspace=ws) - except Exception: - logger.information("RebinToWorkspace failed. Attempting to continue without it.") - - if self._multiple_frames and self._rebin_string is not None: - self._rebin_mf(workspaces) - - def set_rebin_string(self, value): - if value is not None: - self._rebin_string = value - - def _rebin_mf(self, workspaces): - nbin = 0 - rstwo = self._rebin_string.split(",") - if len(rstwo) >= 5: - rstwo = ",".join(rstwo[2:]) - else: - rstwo = self._rebin_string - for ws in workspaces: - nbins = mtd[ws].blocksize() - if nbins > nbin: nbin = nbins - for ws in workspaces: - if mtd[ws].blocksize() == nbin: - Rebin(InputWorkspace=ws,OutputWorkspace= ws,Params= self._rebin_string) - else: - Rebin(InputWorkspace=ws,OutputWorkspace= ws,Params= rstwo) - -class RebinToFirstSpectrum(ReductionStep): - """ - A simple step to rebin the input workspace to match - the first spectrum of itself - """ - - def execute(self, reducer, inputworkspace): - RebinToWorkspace(WorkspaceToRebin=inputworkspace,WorkspaceToMatch=inputworkspace, - OutputWorkspace=inputworkspace) - -class NormaliseToUnityStep(ReductionStep): - """ - A simple step to normalise a workspace to a given factor - """ - _factor = None - _peak_min = None - _peak_max = None - - def execute(self, reducer, ws): - number_historgrams = mtd[ws].getNumberHistograms() - Integration(InputWorkspace=ws, OutputWorkspace=ws, RangeLower=self._peak_min, RangeUpper= self._peak_max) - ws_mask, num_zero_spectra = FindDetectorsOutsideLimits(InputWorkspace=ws, OutputWorkspace='__temp_ws_mask') - DeleteWorkspace(ws_mask) - - tempSum = SumSpectra(InputWorkspace=ws, OutputWorkspace='__temp_sum') - total = tempSum.readY(0)[0] - DeleteWorkspace(tempSum) - - if self._factor is None: - self._factor = 1 / ( total / (number_historgrams - num_zero_spectra) ) - - Scale(InputWorkspace=ws, OutputWorkspace=ws, Factor=self._factor, Operation='Multiply') - - def set_factor(self, factor): - self._factor = factor - - def set_peak_range(self, pmin, pmax): - self._peak_min = pmin - self._peak_max = pmax - -class DetailedBalance(ReductionStep): - """ - """ - _temp = None - _multiple_frames = False - - def __init__(self, MultipleFrames=False): - super(DetailedBalance, self).__init__() - self._temp = None - self._multiple_frames = MultipleFrames - - def execute(self, reducer, file_ws): - if self._temp is None: - return - - correction = 11.606 / ( 2 * self._temp ) - - if self._multiple_frames : - workspaces = mtd[file_ws].getNames() - else: - workspaces = [file_ws] - - for ws in workspaces: - ExponentialCorrection(InputWorkspace=ws,OutputWorkspace= ws,C0= 1.0,C1= correction, Operation="Multiply") - - def set_temperature(self, temp): - self._temp = temp - -class Scaling(ReductionStep): - """ - """ - _scale_factor = None - _multiple_frames = False - - def __init__(self, MultipleFrames=False): - super(Scaling, self).__init__() - self._scale_factor = None - self._multiple_frames = MultipleFrames - - def execute(self, reducer, file_ws): - if self._scale_factor is None: # Scale factor is the default value, 1.0 - return - - if self._multiple_frames : - workspaces = mtd[file_ws].getNames() - else: - workspaces = [file_ws] - - for ws in workspaces: - Scale(InputWorkspace=ws,OutputWorkspace= ws,Factor= self._scale_factor, Operation="Multiply") - - def set_scale_factor(self, scaleFactor): - self._scale_factor = scaleFactor - -class Grouping(ReductionStep): - """This ReductionStep handles the grouping and renaming of the final - workspace. In most cases, this will require a Rebin on the data. The option - to do this is given in the ConvertToEnergy step. - - The step will use the following parameters on the workspace: - * 'Workflow.GroupingMethod' - if this is equal to 'File' then we look for a - parameter called: - * 'Workflow.GroupingFile' - the name of a file which contains the grouping of - detectors for the instrument. - - If a masking list has been set using set_mask_list(), then the workspace - indices listed will not be included in the group (if any grouping is in - fact performed). - """ - _grouping_policy = None - _masking_detectors = [] - _result_workspaces = [] - _multiple_frames = False - - def __init__(self, MultipleFrames=False): - super(Grouping, self).__init__() - self._grouping_policy = None - self._masking_detectors = [] - self._result_workspaces = [] - self._multiple_frames = MultipleFrames - - def execute(self, reducer, file_ws): - - if self._multiple_frames: - try: - workspaces = mtd[file_ws].getNames() - except AttributeError: - workspaces = [file_ws] - else: - workspaces = [file_ws] - - # Set the detector mask for this workspace - if file_ws in reducer._masking_detectors: - self._masking_detectors = reducer._masking_detectors[file_ws] - - for ws in workspaces: - # If a grouping policy has not been set then try to get one from the IPF - if self._grouping_policy is None: - try: - group = mtd[ws].getInstrument().getStringParameter('Workflow.GroupingMethod')[0] - except IndexError: - group = 'User' - - if group == 'File': - self._grouping_policy = mtd[ws].getInstrument().getStringParameter('Workflow.GroupingFile')[0] - else: - self._grouping_policy = group - - self._result_workspaces.append(self._group_data(ws)) - - def set_grouping_policy(self, value): - self._grouping_policy = value - - def get_result_workspaces(self): - return self._result_workspaces - - def _group_data(self, workspace): - grouping = self._grouping_policy - if grouping == 'Individual' or grouping is None: - return workspace - elif grouping == 'All': - nhist = mtd[workspace].getNumberHistograms() - wslist = [] - for i in range(0, nhist): - if i not in self._masking_detectors: - wslist.append(i) - GroupDetectors(InputWorkspace=workspace, OutputWorkspace=workspace, - WorkspaceIndexList=wslist, Behaviour='Average') - else: - # We may have either a workspace name or a mapping file name here - grouping_workspace = None - grouping_filename = None - - # See if it a workspace in ADS - # If not assume it is a mapping file - try: - grouping_workspace = mtd[grouping] - except KeyError: - logger.notice("Cannot find group workspace " + grouping + ", attempting to find as file") - - # See if it is an absolute path - # Otherwise check in the default group files directory - if os.path.isfile(grouping): - grouping_filename = grouping - else: - grouping_filename = os.path.join(config.getString('groupingFiles.directory'), grouping) - - # Mask detectors before grouping if we need to - if len(self._masking_detectors) > 0: - MaskDetectors(workspace, WorkspaceIndexList=self._masking_detectors) - - # Run GroupDetectors with a workspace if we have one - # Otherwise try to run it with a mapping file - if grouping_workspace is not None: - GroupDetectors(InputWorkspace=workspace, OutputWorkspace=workspace, CopyGroupingFromWorkspace=grouping_workspace,\ - Behaviour='Average') - elif os.path.isfile(grouping_filename): - GroupDetectors(InputWorkspace=workspace, OutputWorkspace=workspace, MapFile=grouping_filename,\ - Behaviour='Average') - - return workspace - -class SaveItem(ReductionStep): - """This routine will save a given workspace in the selected file formats. - The currently recognised formats are: - * 'spe' - SPE ASCII format - * 'nxs' - NeXus compressed file format - * 'nxspe' - NeXus SPE file format - * 'ascii' - Comma Seperated Values (file extension '.dat') - * 'gss' - GSAS file format (N.B.: units will be converted to Time of - Flight if not already in that unit for saving in this format). - * 'davegrp' - DAVE grouped ASCII format - """ - _formats = [] - _save_to_cm_1 = False - - def __init__(self): - super(SaveItem, self).__init__() - self._formats = [] - - def execute(self, reducer, file_ws): - naming = Naming() - filename = naming.get_ws_name(file_ws, reducer) - for format in self._formats: - if format == 'spe': - SaveSPE(InputWorkspace=file_ws, Filename=filename + '.spe') - elif format == 'nxs': - SaveNexusProcessed(InputWorkspace=file_ws, Filename=filename + '.nxs') - elif format == 'nxspe': - SaveNXSPE(InputWorkspace=file_ws, Filename=filename + '.nxspe') - elif format == 'ascii': - # Version 1 of SaveASCII produces output that works better with excel/origin - # For some reason this has to be done with an algorithm object, using the function - # wrapper with Version did not change the version that was run - saveAsciiAlg = mantid.api.AlgorithmManager.createUnmanaged('SaveAscii', 1) - saveAsciiAlg.initialize() - saveAsciiAlg.setProperty('InputWorkspace', file_ws) - saveAsciiAlg.setProperty('Filename', filename + '.dat') - saveAsciiAlg.execute() - - elif format == 'gss': - ConvertUnits(InputWorkspace=file_ws, OutputWorkspace="__save_item_temp", Target="TOF") - SaveGSS(InputWorkspace="__save_item_temp", Filename=filename + ".gss") - DeleteWorkspace(Workspace="__save_item_temp") - elif format == 'aclimax': - if self._save_to_cm_1 == False: - bins = '3, -0.005, 500' #meV - else: - bins = '24, -0.005, 4000' #cm-1 - Rebin(InputWorkspace=file_ws,OutputWorkspace= file_ws + '_aclimax_save_temp', Params=bins) - SaveAscii(InputWorkspace=file_ws + '_aclimax_save_temp', Filename=filename + '_aclimax.dat', Separator='Tab') - DeleteWorkspace(Workspace=file_ws + '_aclimax_save_temp') - elif format == 'davegrp': - ConvertSpectrumAxis(InputWorkspace=file_ws, OutputWorkspace=file_ws + '_davegrp_save_temp', Target='ElasticQ', EMode='Indirect') - SaveDaveGrp(InputWorkspace=file_ws + '_davegrp_save_temp', Filename=filename + '.grp') - DeleteWorkspace(Workspace=file_ws + '_davegrp_save_temp') - - def set_formats(self, formats): - self._formats = formats - - def set_save_to_cm_1(self, save_to_cm_1): - self._save_to_cm_1 = save_to_cm_1 - -class Naming(ReductionStep): - """Takes the responsibility of naming the results away from the Grouping - step so that ws names are consistent right up until the last step. This - uses the following instrument parameters: - * 'Workflow.NamingConvention' - to decide how to name the result workspace. - The default (when nothing is selected) is to use the run title. - """ - _result_workspaces = [] - - def __init__(self): - super(Naming, self).__init__() - self._result_workspaces = [] - self._multi_run = False - - def execute(self, reducer, file_ws): - self._multi_run = reducer._sum - wsname = self._get_ws_name(file_ws) - RenameWorkspace(InputWorkspace=file_ws, OutputWorkspace=wsname) - self._result_workspaces.append(wsname) - - def get_result_workspaces(self): - return self._result_workspaces - - def get_ws_name(self, workspace, reducer): - self._multi_run = reducer._sum - return self._get_ws_name(workspace) - - def _get_ws_name(self, workspace): - try: - type = mtd[workspace].getInstrument().getStringParameter( - 'Workflow.NamingConvention')[0] - except IndexError: - type = 'RunTitle' - - if type == 'AnalyserReflection': - return self._analyser_reflection(workspace) - elif type == 'RunTitle': - return self._run_title(workspace) - else: - raise NotImplementedError('Unknown \'Workflow.NamingConvention\''\ - ' parameter encountered on workspace: ' + workspace) - - def _run_title(self, workspace): - ws = mtd[workspace] - title = ws.getRun()['run_title'].value.strip() - runNo = ws.getRun()['run_number'].value - if self._multi_run: - runNo += '_multi' - inst = ws.getInstrument().getName() - isn = config.getFacility().instrument(inst).shortName().upper() - valid = "-_.() %s%s" % (string.ascii_letters, string.digits) - title = ''.join(ch for ch in title if ch in valid) - title = isn + runNo + '-' + title - return title - - def _analyser_reflection(self, workspace): - if workspace == '': - return '' - ws = mtd[workspace] - inst = ws.getInstrument().getName() - - short_name = '' - try: - short_name = config.getFacility().instrument(inst).shortName().lower() - except RuntimeError: - for facility in config.getFacilities(): - try: - short_name = facility.instrument(inst).shortName().lower() - except RuntimeError: - pass - - if short_name == '': - raise RuntimeError('Cannot find instrument "%s" in any facility' % str(inst)) - - run = ws.getRun().getLogData('run_number').value - if self._multi_run: - run += '_multi' - try: - analyser = ws.getInstrument().getStringParameter('analyser')[0] - reflection = ws.getInstrument().getStringParameter('reflection')[0] - except IndexError: - analyser = '' - reflection = '' - prefix = short_name + run + '_' + analyser + reflection + '_red' - return prefix diff --git a/Code/Mantid/scripts/Inelastic/msg_reducer.py b/Code/Mantid/scripts/Inelastic/msg_reducer.py deleted file mode 100644 index fe8a132e8758..000000000000 --- a/Code/Mantid/scripts/Inelastic/msg_reducer.py +++ /dev/null @@ -1,195 +0,0 @@ -# msg_reducer.py -# Reducers for use by ISIS Molecular Spectroscopy Group -import os.path - -from mantid.simpleapi import * -from mantid.kernel import config, logger -import reduction.reducer as reducer -import inelastic_indirect_reduction_steps as steps - -class MSGReducer(reducer.Reducer): - """This is the base class for the reducer classes to be used by the ISIS - Molecular Spectroscopy Group (MSG). It exists to serve the functions that - are common to both spectroscopy and diffraction workflows in the hopes of - providing a semi-consistent interface to both. - """ - - _instrument_name = None #: Name of the instrument used in experiment. - _sum = False #: Whether to sum input files or treat them sequentially. - _load_logs = False #: Whether to load the log file(s) associated with the raw file. - _multiple_frames = False - _detector_range = [-1, -1] - _masking_detectors = {} - _parameter_file = None - _rebin_string = None - _fold_multiple_frames = True - _save_formats = [] - _info_table_props = None - _extra_load_opts = {} - _reduction_steps = None - _data_files = None - - def __init__(self): - super(MSGReducer, self).__init__() - - def pre_process(self): - self._reduction_steps = [] - - loadData = steps.LoadData() - loadData.set_ws_list(self._data_files) - loadData.set_sum(self._sum) - loadData.set_load_logs(self._load_logs) - loadData.set_detector_range(self._detector_range[0],\ - self._detector_range[1]) - loadData.set_parameter_file(self._parameter_file) - loadData.set_extra_load_opts(self._extra_load_opts) - loadData.execute(self, None) - - if loadData.contains_event_data and (self._rebin_string is None or self._rebin_string is ''): - logger.warning('Reductins of event data without rebinning may give bad data!') - - self._multiple_frames = loadData.is_multiple_frames() - - if self._info_table_props is not None : - wsNames = loadData.get_ws_list().keys() - wsNameList = ", ".join(wsNames) - propsList = ", ".join(self._info_table_props) - CreateLogPropertyTable( - OutputWorkspace="RunInfo", - InputWorkspaces=wsNameList, - LogPropertyNames=propsList, - GroupPolicy="First") - - if self._sum : - self._data_files = loadData.get_ws_list() - - self._setup_steps() - - def set_detector_range(self, start, end): - """Sets the start and end detector points for the reduction process. - These numbers are to be the *workspace index*, not the spectrum number. - Example: - reducer.set_detector_range(2,52) - """ - if ( not isinstance(start, int) ) or ( not isinstance(end, int) ): - raise TypeError("start and end must be integer values") - self._detector_range = [ start, end ] - - def set_fold_multiple_frames(self, value): - """When this is set to False, the reducer will not run the FoldData - reduction step or any step which appears after it in the reduction - chain. - This will only affect data which would ordinarily have used this - function (ie TOSCA on multiple frames). - """ - if not isinstance(value, bool): - raise TypeError("value must be of boolean type") - self._fold_multiple_frames = value - - def set_instrument_name(self, instrument): - """Unlike the SANS reducers, we do not create a class to describe the - instruments. Instead, we load the instrument and parameter file and - query it for information. - Raises: - * ValueError if an instrument name is not provided. - * RuntimeError if IDF could not be found or is invalid. - * RuntimeError if workspace index of the Monitor could not be - determined. - Example use: - reducer.set_instrument_name("IRIS") - """ - if not isinstance(instrument, str): - raise ValueError("Instrument name must be given.") - self._instrument_name = instrument - - def set_parameter_file(self, file_name): - """Sets the parameter file to be used in the reduction. The parameter - file will contain some settings that are used throughout the reduction - process. - Note: This is *not* the base parameter file, ie "IRIS_Parameters.xml" - but, rather, the additional parameter file. - """ - self._parameter_file = file_name - for directory in config.getInstrumentDirectories(): - if os.path.isfile(os.path.join(directory, file_name)): - self._parameter_file = os.path.join(directory, file_name) - return - - def set_rebin_string(self, rebin): - """Sets the rebin string to be used with the Rebin algorithm. - """ - if not isinstance(rebin, str): - raise TypeError("rebin variable must be of string type") - self._rebin_string = rebin - - def set_sum_files(self, value): - """Mark whether multiple runs should be summed together for the process - or treated individually. - The default value for this is False. - """ - if not isinstance(value, bool): - raise TypeError("value must be either True or False (boolean)") - self._sum = value - - def set_load_logs(self, value): - """Mark whether the log file(s) associated with a raw file should be - loaded along with the raw file. - The default value for this is False. - """ - if not isinstance(value, bool): - raise TypeError("value must be either True or False (boolean)") - self._load_logs = value - - def set_save_formats(self, formats): - """Selects the save formats in which to export the reduced data. - formats should be a list object of strings containing the file - extension that signifies the type. - For example: - reducer.set_save_formats(['nxs', 'spe']) - Tells the reducer to save the final result as a NeXuS file, and as an - SPE file. - Please see the documentation for the SaveItem reduction step for more - details. - """ - if not isinstance(formats, list): - raise TypeError("formats variable must be of list type") - self._save_formats = formats - - def append_load_option(self, name, value): - """ - Additional options for the Load call, require name & value - of property - """ - self._extra_load_opts[name] = value - - def get_result_workspaces(self): - """Returns a Python list object containing the names of the workspaces - processed at the last reduction step. Using this, you can incorporate - the reducer into your own scripts. - It will only be effective after the reduce() function has run. - Example: - wslist = reducer.get_result_workspaces() - plotSpectrum(wslist, 0) # Plot the first spectrum of each of - # the result workspaces - """ - nsteps = len(self._reduction_steps) - for i in range(0, nsteps): - try: - step = self._reduction_steps[nsteps-(i+1)] - return step.get_result_workspaces() - except AttributeError: - pass - except IndexError: - raise RuntimeError("None of the reduction steps implement "\ - "the get_result_workspaces() method.") - - def _get_monitor_index(self, workspace): - """Determine the workspace index of the first monitor spectrum. - """ - inst = workspace.getInstrument() - try: - monitor_index = inst.getNumberParameter('Workflow.Monitor1-SpectrumNumber')[0] - return int(monitor_index) - except IndexError: - raise ValueError('Unable to retrieve spectrum number of monitor.') -