diff --git a/Code/Mantid/scripts/Inelastic/IndirectAbsCor.py b/Code/Mantid/scripts/Inelastic/IndirectAbsCor.py index 34388afef2b6..7b929f876663 100644 --- a/Code/Mantid/scripts/Inelastic/IndirectAbsCor.py +++ b/Code/Mantid/scripts/Inelastic/IndirectAbsCor.py @@ -1,9 +1,11 @@ # IDA F2PY Absorption Corrections Wrapper ## Handle selection of .pyd files for absorption corrections -import platform, sys + +import sys from IndirectImport import * + if is_supported_f2py_platform(): - cylabs = import_f2py("cylabs") + CYLABS = import_f2py("cylabs") else: unsupported_message() @@ -11,109 +13,113 @@ from mantid.simpleapi import * from mantid import config, logger, mtd import math, os.path, numpy as np -mp = import_mantidplot() - -def WaveRange(inWS, efixed): -# create a list of 10 equi-spaced wavelengths spanning the input data - oWS = '__WaveRange' - ExtractSingleSpectrum(InputWorkspace=inWS, OutputWorkspace=oWS, WorkspaceIndex=0) - ConvertUnits(InputWorkspace=oWS, OutputWorkspace=oWS, Target='Wavelength', - EMode='Indirect', EFixed=efixed) - Xin = mtd[oWS].readX(0) - xmin = mtd[oWS].readX(0)[0] - xmax = mtd[oWS].readX(0)[len(Xin)-1] + +MTD_PLOT = import_mantidplot() + +PICONV = math.pi / 180.0 + + +def WaveRange(in_ws, e_fixed): + # create a list of 10 equi-spaced wavelengths spanning the input data + out_ws = '__WaveRange' + ExtractSingleSpectrum(InputWorkspace=in_ws, OutputWorkspace=out_ws, WorkspaceIndex=0) + ConvertUnits(InputWorkspace=out_ws, OutputWorkspace=out_ws, Target='Wavelength', + EMode='Indirect', EFixed=e_fixed) + x_in = mtd[out_ws].readX(0) + x_min = mtd[out_ws].readX(0)[0] + x_max = mtd[out_ws].readX(0)[len(x_in)-1] ebin = 0.5 - nw1 = int(xmin/ebin) - nw2 = int(xmax/ebin)+1 - w1 = nw1*ebin - w2 = nw2*ebin + nw1 = int(x_min/ebin) + nw2 = int(x_max/ebin) + 1 + w1 = nw1 * ebin + w2 = nw2 * ebin wave = [] nw = 10 ebin = (w2-w1)/(nw-1) - for l in range(0,nw): + for l in range(0, nw): wave.append(w1+l*ebin) - DeleteWorkspace(oWS) + DeleteWorkspace(out_ws) return wave -def CheckSize(size,geom,ncan,Verbose): +def CheckSize(size, geom, num_can, verbose): if geom == 'cyl': if (size[1] - size[0]) < 1e-4: - error = 'Sample outer radius not > inner radius' + error = 'Sample outer radius not > inner radius' logger.notice('ERROR *** '+error) sys.exit(error) else: - if Verbose: + if verbose: message = 'Sam : inner radius = '+str(size[0])+' ; outer radius = '+str(size[1]) logger.notice(message) if geom == 'flt': if size[0] < 1e-4: - error = 'Sample thickness is zero' + error = 'Sample thickness is zero' logger.notice('ERROR *** '+error) sys.exit(error) else: - if Verbose: + if verbose: logger.notice('Sam : thickness = '+str(size[0])) - if ncan == 2: + if num_can == 2: if geom == 'cyl': if (size[2] - size[1]) < 1e-4: - error = 'Can inner radius not > sample outer radius' + error = 'Can inner radius not > sample outer radius' logger.notice('ERROR *** '+error) sys.exit(error) else: - if Verbose: + if verbose: message = 'Can : inner radius = '+str(size[1])+' ; outer radius = '+str(size[2]) logger.notice(message) if geom == 'flt': if size[1] < 1e-4: - error = 'Can thickness is zero' + error = 'Can thickness is zero' logger.notice('ERROR *** '+error) sys.exit(error) else: - if Verbose: + if verbose: logger.notice('Can : thickness = '+str(size[1])) -def CheckDensity(density,ncan): +def CheckDensity(density, num_can): if density[0] < 1e-5: - error = 'Sample density is zero' + error = 'Sample density is zero' logger.notice('ERROR *** '+error) sys.exit(error) - if ncan == 2: + if num_can == 2: if density[1] < 1e-5: - error = 'Can density is zero' + error = 'Can density is zero' logger.notice('ERROR *** '+error) sys.exit(error) -def AbsRun(inputWS, geom, beam, ncan, size, density, sigs, siga, avar, Verbose, Save): +def AbsRun(inputWS, geom, beam, num_can, size, density, sigs, siga, avar, verbose, save): workdir = getDefaultWorkingDirectory() - if Verbose: + if verbose: logger.notice('Sample run : '+inputWS) # check that there is data - Xin = mtd[inputWS].readX(0) - if len(Xin) == 0: - error = 'Sample file has no data' + x_in = mtd[inputWS].readX(0) + if len(x_in) == 0: + error = 'Sample file has no data' logger.notice('ERROR *** '+error) sys.exit(error) - CheckSize(size,geom,ncan,Verbose) - CheckDensity(density,ncan) + CheckSize(size, geom, num_can, verbose) + CheckDensity(density, num_can) det = GetWSangles(inputWS) ndet = len(det) efixed = getEfixed(inputWS) - wavelas = math.sqrt(81.787/efixed) # elastic wavelength - waves = WaveRange(inputWS, efixed) # get wavelengths + wavelas = math.sqrt(81.787/efixed) # elastic wavelength + waves = WaveRange(inputWS, efixed) # get wavelengths nw = len(waves) run_name = getWSprefix(inputWS) - - if Verbose: + + if verbose: message = 'Sam : sigt = '+str(sigs[0])+' ; siga = '+str(siga[0])+' ; rho = '+str(density[0]) logger.notice(message) - if ncan == 2: + if num_can == 2: message = 'Can : sigt = '+str(sigs[1])+' ; siga = '+str(siga[1])+' ; rho = '+str(density[1]) logger.notice(message) @@ -124,112 +130,113 @@ def AbsRun(inputWS, geom, beam, ncan, size, density, sigs, siga, avar, Verbose, message = 'Detector angles : '+str(ndet)+' from '+str(det[0])+' to '+str(det[ndet-1]) logger.notice(message) - + name = run_name + geom wrk = workdir + run_name - wrk.ljust(120,' ') - - dataA1 = [] - dataA2 = [] - dataA3 = [] - dataA4 = [] + wrk.ljust(120, ' ') + + data_a1 = [] + data_a2 = [] + data_a3 = [] + data_a4 = [] - #initially set errors to zero - eZero = np.zeros(nw) + # initially set errors to zero + e_zero = np.zeros(nw) for n in range(ndet): - #geometry is flat + # geometry is flat if geom == 'flt': angles = [avar, det[n]] - (A1,A2,A3,A4) = FlatAbs(ncan, size, density, sigs, siga, angles, waves) + (a1, a2, a3, a4) = FlatAbs(num_can, size, density, sigs, siga, angles, waves) kill = 0 #geometry is a cylinder elif geom == 'cyl': astep = avar if (astep) < 1e-5: - error = 'Step size is zero' + error = 'Step size is zero' logger.notice('ERROR *** '+error) sys.exit(error) - + nstep = int((size[1] - size[0])/astep) if nstep < 20: - error = 'Number of steps ( '+str(nstep)+' ) should be >= 20' + error = 'Number of steps ( '+str(nstep)+' ) should be >= 20' logger.notice('ERROR *** '+error) sys.exit(error) angle = det[n] - kill, A1, A2, A3, A4 = cylabs.cylabs(astep, beam, ncan, size, + kill, a1, a2, a3, a4 = CYLABS.cylabs(astep, beam, num_can, size, density, sigs, siga, angle, wavelas, waves, n, wrk, 0) if kill == 0: - if Verbose: + if verbose: logger.notice('Detector '+str(n)+' at angle : '+str(det[n])+' * successful') - dataA1 = np.append(dataA1,A1) - dataA2 = np.append(dataA2,A2) - dataA3 = np.append(dataA3,A3) - dataA4 = np.append(dataA4,A4) + data_a1 = np.append(data_a1, a1) + data_a2 = np.append(data_a2, a2) + data_a3 = np.append(data_a3, a3) + data_a4 = np.append(data_a4, a4) else: error = 'Detector '+str(n)+' at angle : '+str(det[n])+' *** failed : Error code '+str(kill) logger.notice('ERROR *** '+error) sys.exit(error) - dataX = waves * ndet - qAxis = createQaxis(inputWS) + data_x = waves * ndet + q_axis = createQaxis(inputWS) # Create the output workspaces - assWS = name + '_ass' - asscWS = name + '_assc' - acscWS = name + '_acsc' - accWS = name + '_acc' + ass_ws = name + '_ass' + assc_ws = name + '_assc' + acsc_ws = name + '_acsc' + acc_ws = name + '_acc' fname = name +'_Abs' - CreateWorkspace(OutputWorkspace=assWS, DataX=dataX, DataY=dataA1, + CreateWorkspace(OutputWorkspace=ass_ws, DataX=data_x, DataY=data_a1, NSpec=ndet, UnitX='Wavelength', - VerticalAxisUnit='MomentumTransfer', VerticalAxisValues=qAxis) + VerticalAxisUnit='MomentumTransfer', VerticalAxisValues=q_axis) - CreateWorkspace(OutputWorkspace=asscWS, DataX=dataX, DataY=dataA2, + CreateWorkspace(OutputWorkspace=ass_ws, DataX=data_x, DataY=data_a2, NSpec=ndet, UnitX='Wavelength', - VerticalAxisUnit='MomentumTransfer', VerticalAxisValues=qAxis) + VerticalAxisUnit='MomentumTransfer', VerticalAxisValues=q_axis) - CreateWorkspace(OutputWorkspace=acscWS, DataX=dataX, DataY=dataA3, + CreateWorkspace(OutputWorkspace=acsc_ws, DataX=data_x, DataY=data_a3, NSpec=ndet, UnitX='Wavelength', - VerticalAxisUnit='MomentumTransfer', VerticalAxisValues=qAxis) + VerticalAxisUnit='MomentumTransfer', VerticalAxisValues=q_axis) - CreateWorkspace(OutputWorkspace=accWS, DataX=dataX, DataY=dataA4, + CreateWorkspace(OutputWorkspace=acc_ws, DataX=data_x, DataY=data_a4, NSpec=ndet, UnitX='Wavelength', - VerticalAxisUnit='MomentumTransfer', VerticalAxisValues=qAxis) + VerticalAxisUnit='MomentumTransfer', VerticalAxisValues=q_axis) - group = assWS +','+ asscWS +','+ acscWS +','+ accWS - GroupWorkspaces(InputWorkspaces=group,OutputWorkspace=fname) + group = ass_ws + ',' + assc_ws + ',' + acsc_ws + ',' + acc_ws + GroupWorkspaces(InputWorkspaces=group, OutputWorkspace=fname) # save output to file if required - if Save: - opath = os.path.join(workdir,fname+'.nxs') + if save: + opath = os.path.join(workdir, fname+'.nxs') SaveNexusProcessed(InputWorkspace=fname, Filename=opath) - if Verbose: + if verbose: logger.notice('Output file created : '+opath) - if ncan > 1: - return [assWS, asscWS, acscWS, accWS] + if num_can > 1: + return [ass_ws, assc_ws, acsc_ws, acc_ws] else: - return [assWS] + return [ass_ws] -def plotAbs(workspaces, plotOpt): - if ( plotOpt == 'None' ): return +def plotAbs(workspaces, plot_opt): + if plot_opt == 'None': + return - if ( plotOpt == 'Wavelength' or plotOpt == 'Both' ): - graph = mp.plotSpectrum(workspaces, 0) + if plot_opt == 'Wavelength' or plot_opt == 'Both': + graph = MTD_PLOT.plotSpectrum(workspaces, 0) - if ( plotOpt == 'Angle' or plotOpt == 'Both' ): - graph = mp.plotTimeBin(workspaces, 0) - graph.activeLayer().setAxisTitle(mp.Layer.Bottom, 'Angle') + if plot_opt == 'Angle' or plot_opt == 'Both': + graph = MTD_PLOT.plotTimeBin(workspaces, 0) + graph.activeLayer().setAxisTitle(MTD_PLOT.Layer.Bottom, 'Angle') -def AbsRunFeeder(inputWS, canWS, geom, ncan, size, avar, density, beam_width=None, sampleFormula=None, canFormula=None, sigs=None, siga=None, - plotOpt='None', Verbose=False,Save=False): +def AbsRunFeeder(input_ws, can_ws, geom, ncan, size, avar, density, beam_width=None, sample_formula=None, can_formula=None, sigs=None, siga=None, + plotOpt='None', verbose=False, save=False): """ Handles the feeding of input and plotting of output for the F2PY absorption correction routine. @@ -253,14 +260,14 @@ def AbsRunFeeder(inputWS, canWS, geom, ncan, size, avar, density, beam_width=Non StartTime('CalculateCorrections') CheckDensity(density, ncan) - #attempt to find beam width if none given + # attempt to find beam width if none given if beam_width is None: - beam_width = getInstrumentParameter(inputWS, 'Workflow.beam-width') + beam_width = getInstrumentParameter(input_ws, 'Workflow.beam-width') beam_width = float(beam_width) - #attempt to find beam height from parameter file + # attempt to find beam height from parameter file try: - beam_height = getInstrumentParameter(inputWS, 'Workflow.beam-height') + beam_height = getInstrumentParameter(input_ws, 'Workflow.beam-height') beam_height = float(beam_height) except ValueError: # fall back on default value for beam height @@ -273,14 +280,14 @@ def AbsRunFeeder(inputWS, canWS, geom, ncan, size, avar, density, beam_width=Non # beam[7:8] hsdown,hsup bottom and top of scattered beam from sample b. beam = [beam_height, 0.5 * beam_width, -0.5 * beam_width, (beam_width / 2), -(beam_width / 2), 0.0, beam_height, 0.0, beam_height] - if sampleFormula is None and (sigs is None or siga is None): + if sample_formula is None and (sigs is None or siga is None): raise ValueError("Either a formula for the sample or values for the cross sections must be supplied.") #set sample material based on input or formula - if sampleFormula is not None: - SetSampleMaterial(InputWorkspace=inputWS, ChemicalFormula=sampleFormula, SampleNumberDensity=density[0]) + if sample_formula is not None: + SetSampleMaterial(InputWorkspace=input_ws, ChemicalFormula=sample_formula, SampleNumberDensity=density[0]) - sample = mtd[inputWS].sample() + sample = mtd[input_ws].sample() sam_mat = sample.getMaterial() # total scattering x-section @@ -288,11 +295,11 @@ def AbsRunFeeder(inputWS, canWS, geom, ncan, size, avar, density, beam_width=Non # absorption x-section siga[0] = sam_mat.absorbXSection() - if canFormula is not None and ncan == 2: + if can_formula is not None and ncan == 2: #set can material based on input or formula - SetSampleMaterial(InputWorkspace=canWS, ChemicalFormula=canFormula, SampleNumberDensity=density[1]) + SetSampleMaterial(InputWorkspace=can_ws, ChemicalFormula=can_formula, SampleNumberDensity=density[1]) - can_sample = mtd[canWS].sample() + can_sample = mtd[can_ws].sample() can_mat = can_sample.getMaterial() # total scattering x-section for can @@ -302,20 +309,19 @@ def AbsRunFeeder(inputWS, canWS, geom, ncan, size, avar, density, beam_width=Non siga[1] = can_mat.absorbXSection() siga[2] = can_mat.absorbXSection() - workspaces = AbsRun(inputWS, geom, beam, ncan, size, density, - sigs, siga, avar, Verbose, Save) + workspaces = AbsRun(input_ws, geom, beam, ncan, size, density, + sigs, siga, avar, verbose, save) EndTime('CalculateCorrections') plotAbs(workspaces, plotOpt) - def FlatAbs(ncan, thick, density, sigs, siga, angles, waves): - """ + """ FlatAbs - calculate flat plate absorption factors - + For more information See: - - MODES User Guide: http://www.isis.stfc.ac.uk/instruments/iris/data-analysis/modes-v3-user-guide-6962.pdf - - C J Carlile, Rutherford Laboratory report, RL-74-103 (1974) + - MODES User Guide: http://www.isis.stfc.ac.uk/instruments/iris/data-analysis/modes-v3-user-guide-6962.pdf + - C J Carlile, Rutherford Laboratory report, RL-74-103 (1974) @param sigs - list of scattering cross-sections @param siga - list of absorption cross-sections @@ -325,12 +331,11 @@ def FlatAbs(ncan, thick, density, sigs, siga, angles, waves): @param angles - list of angles @param waves - list of wavelengths """ - PICONV = math.pi/180. #can angle and detector angle tcan1, theta1 = angles - canAngle = tcan1*PICONV - theta = theta1*PICONV + can_angle = tcan1 * PICONV + theta = theta1 * PICONV # tsec is the angle the scattered beam makes with the normal to the sample surface. tsec = theta1-tcan1 @@ -343,68 +348,68 @@ def FlatAbs(ncan, thick, density, sigs, siga, angles, waves): acc = np.ones(nlam) # case where tsec is close to 90 degrees. CALCULATION IS UNRELIABLE - if (abs(abs(tsec)-90.0) < 1.0): + if abs(abs(tsec)-90.0) < 1.0: #default to 1 for everything return ass, assc, acsc, acc else: #sample & can scattering x-section - sampleScatt, canScatt = sigs[:2] + sample_scatt, can_scatt = sigs[:2] #sample & can absorption x-section - sampleAbs, canAbs = siga[:2] - #sample & can density - sampleDensity, canDensity = density[:2] + sample_abs, can_abs = siga[:2] + #sample & can density + sample_density, can_density = density[:2] #thickness of the sample and can - samThickness, canThickness1, canThickness2 = thick - - tsec = tsec*PICONV + sam_thickness, can_thickness1, can_thickness2 = thick + + tsec = tsec * PICONV - sec1 = 1./math.cos(canAngle) + sec1 = 1./math.cos(can_angle) sec2 = 1./math.cos(tsec) #list of wavelengths waves = np.array(waves) #sample cross section - sampleXSection = (sampleScatt + sampleAbs * waves /1.8) * sampleDensity + sample_x_section = (sample_scatt + sample_abs * waves /1.8) * sample_density #vector version of fact - vecFact = np.vectorize(Fact) - fs = vecFact(sampleXSection, samThickness, sec1, sec2) + vec_fact = np.vectorize(Fact) + fs = vec_fact(sample_x_section, sam_thickness, sec1, sec2) - sampleSec1, sampleSec2 = calcThicknessAtSec(sampleXSection, samThickness, [sec1, sec2]) + sample_sec1, sample_sec2 = calcThicknessAtSec(sample_x_section, sam_thickness, [sec1, sec2]) - if (sec2 < 0.): - ass = fs / samThickness + if sec2 < 0.0: + ass = fs / sam_thickness else: - ass= np.exp(-sampleSec2) * fs / samThickness + ass = np.exp(-sample_sec2) * fs / sam_thickness - useCan = (ncan > 1) - if useCan: + use_can = (ncan > 1) + if use_can: #calculate can cross section - canXSection = (canScatt + canAbs * waves /1.8) * canDensity - assc, acsc, acc = calcFlatAbsCan(ass, canXSection, canThickness1, canThickness2, sampleSec1, sampleSec2, [sec1, sec2]) + can_x_section = (can_scatt + can_abs * waves /1.8) * can_density + assc, acsc, acc = calcFlatAbsCan(ass, can_x_section, can_thickness1, can_thickness2, sample_sec1, sample_sec2, [sec1, sec2]) return ass, assc, acsc, acc -def Fact(xSection,thickness,sec1,sec2): - S = xSection*thickness*(sec1-sec2) - F = 1.0 - if (S == 0.): - F = thickness +def Fact(x_section, thickness, sec1, sec2): + s = x_section * thickness * (sec1 - sec2) + f = 1.0 + if s == 0.0: + f = thickness else: - S = (1-math.exp(-S))/S - F = thickness*S - return F + s = (1 - math.exp(-s)) / s + f = thickness * s + return f -def calcThicknessAtSec(xSection, thickness, sec): +def calcThicknessAtSec(x_section, thickness, sec): sec1, sec2 = sec - thickSec1 = xSection * thickness * sec1 - thickSec2 = xSection * thickness * sec2 + thick_sec1 = x_section * thickness * sec1 + thick_sec2 = x_section * thickness * sec2 - return thickSec1, thickSec2 + return thick_sec1, thick_sec2 -def calcFlatAbsCan(ass, canXSection, canThickness1, canThickness2, sampleSec1, sampleSec2, sec): +def calcFlatAbsCan(ass, can_x_section, can_thickness1, can_thickness2, sample_sec1, sample_sec2, sec): assc = np.ones(ass.size) acsc = np.ones(ass.size) acc = np.ones(ass.size) @@ -412,36 +417,36 @@ def calcFlatAbsCan(ass, canXSection, canThickness1, canThickness2, sampleSec1, s sec1, sec2 = sec #vector version of fact - vecFact = np.vectorize(Fact) - f1 = vecFact(canXSection,canThickness1,sec1,sec2) - f2 = vecFact(canXSection,canThickness2,sec1,sec2) + vec_fact = np.vectorize(Fact) + f1 = vec_fact(can_x_section, can_thickness1, sec1, sec2) + f2 = vec_fact(can_x_section, can_thickness2, sec1, sec2) - canThick1Sec1, canThick1Sec2 = calcThicknessAtSec(canXSection, canThickness1, sec) - canThick2Sec1, canThick2Sec2 = calcThicknessAtSec(canXSection, canThickness2, sec) + can_thick1_sec1, can_thick1_sec2 = calcThicknessAtSec(can_x_section, can_thickness1, sec) + can_thick2_sec1, can_thick2_sec2 = calcThicknessAtSec(can_x_section, can_thickness2, sec) - if (sec2 < 0.): - val = np.exp(-(canThick1Sec1-canThick1Sec2)) + if sec2 < 0.0: + val = np.exp(-(can_thick1_sec1 - can_thick1_sec2)) assc = ass * val acc1 = f1 acc2 = f2 * val acsc1 = acc1 - acsc2 = acc2 * np.exp(-(sampleSec1-sampleSec2)) + acsc2 = acc2 * np.exp(-(sample_sec1 - sample_sec2)) else: - val = np.exp(-(canThick1Sec1+canThick2Sec2)) + val = np.exp(-(can_thick1_sec1 + can_thick2_sec2)) assc = ass * val - acc1 = f1 * np.exp(-(canThick1Sec2+canThick2Sec2)) + acc1 = f1 * np.exp(-(can_thick1_sec2 + can_thick2_sec2)) acc2 = f2 * val - acsc1 = acc1 * np.exp(-sampleSec2) - acsc2 = acc2 * np.exp(-sampleSec1) + acsc1 = acc1 * np.exp(-sample_sec2) + acsc2 = acc2 * np.exp(-sample_sec1) - canThickness = canThickness1+canThickness2 + can_thickness = can_thickness1 + can_thickness2 - if(canThickness > 0.): - acc = (acc1+acc2)/canThickness - acsc = (acsc1+acsc2)/canThickness + if can_thickness > 0.0: + acc = (acc1 + acc2) / can_thickness + acsc = (acsc1 + acsc2) / can_thickness - return assc, acsc, acc \ No newline at end of file + return assc, acsc, acc diff --git a/Code/Mantid/scripts/Inelastic/IndirectCommon.py b/Code/Mantid/scripts/Inelastic/IndirectCommon.py index 62a1b7f72282..39bf14535a5a 100644 --- a/Code/Mantid/scripts/Inelastic/IndirectCommon.py +++ b/Code/Mantid/scripts/Inelastic/IndirectCommon.py @@ -4,10 +4,11 @@ from IndirectImport import import_mantidplot -import sys, platform, os.path, math, datetime, re +import os.path, math, datetime, re import numpy as np import itertools + def StartTime(prog): logger.notice('----------') message = 'Program ' + prog +' started @ ' + str(datetime.datetime.now()) @@ -18,12 +19,12 @@ def EndTime(prog): logger.notice(message) logger.notice('----------') -def loadInst(instrument): - ws = '__empty_' + instrument - if not mtd.doesExist(ws): +def loadInst(instrument): + workspace = '__empty_' + instrument + if not mtd.doesExist(workspace): idf_dir = config['instrumentDefinition.directory'] idf = idf_dir + instrument + '_Definition.xml' - LoadEmptyInstrument(Filename=idf, OutputWorkspace=ws) + LoadEmptyInstrument(Filename=idf, OutputWorkspace=workspace) def loadNexus(filename): ''' @@ -31,7 +32,7 @@ def loadNexus(filename): filename. Convenience function for not having to play around with paths in every function. ''' - name = os.path.splitext( os.path.split(filename)[1] )[0] + name = os.path.splitext(os.path.split(filename)[1])[0] LoadNexus(Filename=filename, OutputWorkspace=name) return name @@ -40,10 +41,10 @@ def getInstrRun(ws_name): Get the instrument name and run number from a workspace. @param ws_name - name of the workspace - @return tuple of form (instrument, run number) + @return tuple of form (instrument, run number) ''' - ws = mtd[ws_name] - run_number = str(ws.getRunNumber()) + workspace = mtd[ws_name] + run_number = str(workspace.getRunNumber()) if run_number == '0': #attempt to parse run number off of name match = re.match(r'([a-zA-Z]+)([0-9]+)', ws_name) @@ -52,7 +53,7 @@ def getInstrRun(ws_name): else: raise RuntimeError("Could not find run number associated with workspace.") - instrument = ws.getInstrument().getName() + instrument = workspace.getInstrument().getName() facility = config.getFacility() instrument = facility.instrument(instrument).filePrefix(int(run_number)) instrument = instrument.lower() @@ -67,10 +68,10 @@ def getWSprefix(wsname): if wsname == '': return '' - ws = mtd[wsname] + workspace = mtd[wsname] facility = config['default.facility'] - ws_run = ws.getRun() + ws_run = workspace.getRun() if 'facility' in ws_run: facility = ws_run.getLogData('facility').value @@ -81,8 +82,8 @@ def getWSprefix(wsname): run_name = instrument + run_number try: - analyser = ws.getInstrument().getStringParameter('analyser')[0] - reflection = ws.getInstrument().getStringParameter('reflection')[0] + analyser = workspace.getInstrument().getStringParameter('analyser')[0] + reflection = workspace.getInstrument().getStringParameter('reflection')[0] except IndexError: analyser = '' reflection = '' @@ -91,26 +92,26 @@ def getWSprefix(wsname): if len(analyser + reflection) > 0: prefix += '_' - + return prefix -def getEfixed(workspace, detIndex=0): +def getEfixed(workspace, det_index=0): inst = mtd[workspace].getInstrument() return inst.getNumberParameter("efixed-val")[0] def checkUnitIs(ws, unit_id, axis_index=0): - """ - Check that the workspace has the correct units by comparing + """ + Check that the workspace has the correct units by comparing against the UnitID. """ axis = mtd[ws].getAxis(axis_index) unit = axis.getUnit() - return (unit.unitID() == unit_id) + return unit.unitID() == unit_id # Get the default save directory and check it's valid def getDefaultWorkingDirectory(): workdir = config['defaultsave.directory'] - + if not os.path.isdir(workdir): raise IOError("Default save directory is not a valid path!") @@ -118,54 +119,53 @@ def getDefaultWorkingDirectory(): def createQaxis(inputWS): result = [] - ws = mtd[inputWS] - nHist = ws.getNumberHistograms() - if ws.getAxis(1).isSpectra(): - inst = ws.getInstrument() - samplePos = inst.getSample().getPos() - beamPos = samplePos - inst.getSource().getPos() - for i in range(0,nHist): + workspace = mtd[inputWS] + n_hist = workspace.getNumberHistograms() + if workspace.getAxis(1).isSpectra(): + inst = workspace.getInstrument() + sample_pos = inst.getSample().getPos() + beam_pos = sample_pos - inst.getSource().getPos() + for i in range(0, n_hist): efixed = getEfixed(inputWS, i) - detector = ws.getDetector(i) - theta = detector.getTwoTheta(samplePos, beamPos) / 2 + detector = workspace.getDetector(i) + theta = detector.getTwoTheta(sample_pos, beam_pos) / 2 lamda = math.sqrt(81.787/efixed) - q = 4 * math.pi * math.sin(theta) / lamda - result.append(q) + q_value = 4 * math.pi * math.sin(theta) / lamda + result.append(q_value) else: - axis = ws.getAxis(1) + axis = workspace.getAxis(1) msg = 'Creating Axis based on Detector Q value: ' if not axis.isNumeric(): msg += 'Input workspace must have either spectra or numeric axis.' raise ValueError(msg) - if ( axis.getUnit().unitID() != 'MomentumTransfer' ): + if axis.getUnit().unitID() != 'MomentumTransfer': msg += 'Input must have axis values of Q' raise ValueError(msg) - for i in range(0, nHist): + for i in range(0, n_hist): result.append(float(axis.label(i))) return result -def GetWSangles(inWS): - nhist = mtd[inWS].getNumberHistograms() # get no. of histograms/groups - sourcePos = mtd[inWS].getInstrument().getSource().getPos() - samplePos = mtd[inWS].getInstrument().getSample().getPos() - beamPos = samplePos - sourcePos - angles = [] # will be list of angles +def GetWSangles(in_ws): + nhist = mtd[in_ws].getNumberHistograms() # get no. of histograms/groups + source_pos = mtd[in_ws].getInstrument().getSource().getPos() + sample_pos = mtd[in_ws].getInstrument().getSample().getPos() + beam_pos = sample_pos - source_pos + angles = [] # will be list of angles for index in range(0, nhist): - detector = mtd[inWS].getDetector(index) # get index - twoTheta = detector.getTwoTheta(samplePos, beamPos)*180.0/math.pi # calc angle - angles.append(twoTheta) # add angle + detector = mtd[in_ws].getDetector(index) # get index + two_theta = detector.getTwoTheta(sample_pos, beam_pos)*180.0/math.pi # calc angle + angles.append(two_theta) # add angle return angles -def GetThetaQ(ws): - nhist = mtd[ws].getNumberHistograms() # get no. of histograms/groups - efixed = getEfixed(ws) - wavelas = math.sqrt(81.787/efixed) # elastic wavelength - k0 = 4.0*math.pi/wavelas +def GetThetaQ(workspace): + efixed = getEfixed(workspace) + wavelas = math.sqrt(81.787/efixed) # elastic wavelength + k0_val = 4.0*math.pi/wavelas - theta = np.array(GetWSangles(ws)) - Q = k0 * np.sin(0.5 * np.radians(theta)) + theta = np.array(GetWSangles(workspace)) + q_val = k0_val * np.sin(0.5 * np.radians(theta)) - return theta, Q + return theta, q_val def ExtractFloat(data_string): """ Extract float values from an ASCII string""" @@ -179,82 +179,80 @@ def ExtractInt(data_string): values = map(int, values) return values -def PadArray(inarray,nfixed): #pad a list to specified size - npt=len(inarray) +def PadArray(inarray, nfixed): # pad a list to specified size + npt = len(inarray) padding = nfixed-npt - outarray=[] + outarray = [] outarray.extend(inarray) - outarray +=[0]*padding + outarray += [0]*padding return outarray -def CheckAnalysers(in1WS,in2WS,Verbose): - ws1 = mtd[in1WS] - a1 = ws1.getInstrument().getStringParameter('analyser')[0] - r1 = ws1.getInstrument().getStringParameter('reflection')[0] - ws2 = mtd[in2WS] - a2 = ws2.getInstrument().getStringParameter('analyser')[0] - r2 = ws2.getInstrument().getStringParameter('reflection')[0] - if a1 != a2: - raise ValueError('Workspace '+in1WS+' and '+in2WS+' have different analysers') - elif r1 != r2: - raise ValueError('Workspace '+in1WS+' and '+in2WS+' have different reflections') +def CheckAnalysers(in1_ws, in2_ws, verbose): + ws1 = mtd[in1_ws] + analyser1 = ws1.getInstrument().getStringParameter('analyser')[0] + reflection1 = ws1.getInstrument().getStringParameter('reflection')[0] + ws2 = mtd[in2_ws] + analyser2 = ws2.getInstrument().getStringParameter('analyser')[0] + reflection2 = ws2.getInstrument().getStringParameter('reflection')[0] + if analyser1 != analyser2: + raise ValueError('Workspace '+in1_ws+' and '+in2_ws+' have different analysers') + elif reflection1 != reflection2: + raise ValueError('Workspace '+in1_ws+' and '+in2_ws+' have different reflections') else: - if Verbose: - logger.notice('Analyser is '+a1+r1) + if verbose: + logger.notice('Analyser is '+analyser1+reflection1) -def CheckHistZero(inWS): - nhist = mtd[inWS].getNumberHistograms() # no. of hist/groups in WS +def CheckHistZero(in_ws): + nhist = mtd[in_ws].getNumberHistograms() # no. of hist/groups in WS if nhist == 0: - raise ValueError('Workspace '+inWS+' has NO histograms') - Xin = mtd[inWS].readX(0) - ntc = len(Xin)-1 # no. points from length of x array + raise ValueError('Workspace '+in_ws+' has NO histograms') + x_in = mtd[in_ws].readX(0) + ntc = len(x_in) - 1 # no. points from length of x array if ntc == 0: - raise ValueError('Workspace '+inWS+' has NO points') - return nhist,ntc - -def CheckHistSame(in1WS,name1,in2WS,name2): - nhist1 = mtd[in1WS].getNumberHistograms() # no. of hist/groups in WS1 - X1 = mtd[in1WS].readX(0) - xlen1 = len(X1) - nhist2 = mtd[in2WS].getNumberHistograms() # no. of hist/groups in WS2 - X2 = mtd[in2WS].readX(0) - xlen2 = len(X2) - if nhist1 != nhist2: # check that no. groups are the same - e1 = name1+' ('+in1WS+') histograms (' +str(nhist1) + ')' - e2 = name2+' ('+in2WS+') histograms (' +str(nhist2) + ')' - error = e1 + ' not = ' + e2 - raise ValueError(error) - elif xlen1 != xlen2: - e1 = name1+' ('+in1WS+') array length (' +str(xlen1) + ')' - e2 = name2+' ('+in2WS+') array length (' +str(xlen2) + ')' - error = e1 + ' not = ' + e2 - raise ValueError(error) - -def CheckXrange(x_range,type): - if not ( ( len(x_range) == 2 ) or ( len(x_range) == 4 ) ): - raise ValueError(type + ' - Range must contain either 2 or 4 numbers') - + raise ValueError('Workspace '+in_ws+' has NO points') + return nhist, ntc + +def CheckHistSame(in1_ws, name1, in2_ws, name2): + num_hist1 = mtd[in1_ws].getNumberHistograms() # no. of hist/groups in WS1 + x_range1 = mtd[in1_ws].readX(0) + x_len1 = len(x_range1) + num_hist2 = mtd[in2_ws].getNumberHistograms() # no. of hist/groups in WS2 + x_range2 = mtd[in2_ws].readX(0) + x_len2 = len(x_range2) + if num_hist1 != num_hist2: # check that no. groups are the same + error_str = '{0} ({1}) histograms ({2}) not = {3}({4}) histograms ({5})'.format( + name1, in1_ws, num_hist1, name2, in2_ws, num_hist2) + raise ValueError(error_str) + elif x_len1 != x_len2: + error_str = '{0} ({1}) array length ({2}) not = {3}({4}) array length ({5})'.format( + name1, in1_ws, x_len1, name2, in2_ws, x_len2) + raise ValueError(error_str) + +def CheckXrange(x_range, range_type): + if not ((len(x_range) == 2) or (len(x_range) == 4)): + raise ValueError(range_type + ' - Range must contain either 2 or 4 numbers') + for lower, upper in zip(x_range[::2], x_range[1::2]): if math.fabs(lower) < 1e-5: - raise ValueError(type + ' - input minimum ('+str(lower)+') is Zero') + raise ValueError(range_type+' - input minimum ('+str(lower)+') is Zero') if math.fabs(upper) < 1e-5: - raise ValueError(type + ' - input maximum ('+str(upper)+') is Zero') + raise ValueError(range_type+' - input maximum ('+str(upper)+') is Zero') if upper < lower: - raise ValueError(type + ' - input max ('+str(upper)+') < min ('+str(lower)+')') - -def CheckElimits(erange,Xin): - nx = len(Xin)-1 - - if math.fabs(erange[0]) < 1e-5: - raise ValueError('Elimits - input emin ( '+str(erange[0])+' ) is Zero') - if erange[0] < Xin[0]: - raise ValueError('Elimits - input emin ( '+str(erange[0])+' ) < data emin ( '+str(Xin[0])+' )') - if math.fabs(erange[1]) < 1e-5: - raise ValueError('Elimits - input emax ( '+str(erange[1])+' ) is Zero') - if erange[1] > Xin[nx]: - raise ValueError('Elimits - input emax ( '+str(erange[1])+' ) > data emax ( '+str(Xin[nx])+' )') - if erange[1] < erange[0]: - raise ValueError('Elimits - input emax ( '+str(erange[1])+' ) < emin ( '+str(erange[0])+' )') + raise ValueError(range_type+' - input max ('+str(upper)+') < min ('+str(lower)+')') + +def CheckElimits(e_range, x_in): + x_len = len(x_in) - 1 + + if math.fabs(e_range[0]) < 1e-5: + raise ValueError('Elimits - input emin ( '+str(e_range[0])+' ) is Zero') + if e_range[0] < x_in[0]: + raise ValueError('Elimits - input emin ( '+str(e_range[0])+' ) < data emin ( '+str(x_in[0])+' )') + if math.fabs(e_range[1]) < 1e-5: + raise ValueError('Elimits - input emax ( '+str(e_range[1])+' ) is Zero') + if e_range[1] > x_in[x_len]: + raise ValueError('Elimits - input emax ( '+str(e_range[1])+' ) > data emax ( '+str(x_in[x_len])+' )') + if e_range[1] < e_range[0]: + raise ValueError('Elimits - input emax ( '+str(e_range[1])+' ) < emin ( '+str(e_range[0])+' )') def getInstrumentParameter(ws, param_name): """Get an named instrument parameter from a workspace. @@ -262,10 +260,11 @@ def getInstrumentParameter(ws, param_name): @param ws The workspace to get the instrument from. @param param_name The name of the parameter to look up. """ + inst = mtd[ws].getInstrument() - #create a map of type parameters to functions. This is so we avoid writing lots of - #if statements becuase there's no way to dynamically get the type. + # create a map of type parameters to functions. This is so we avoid writing lots of + # if statements becuase there's no way to dynamically get the type. func_map = {'double': inst.getNumberParameter, 'string': inst.getStringParameter, 'int': inst.getIntParameter, 'bool': inst.getBoolParameter} @@ -288,17 +287,18 @@ def plotSpectra(ws, y_axis_title, indicies=[]): @param y_axis_title - label for the y axis @param indicies - list of spectrum indicies to plot """ + if len(indicies) == 0: num_spectra = mtd[ws].getNumberHistograms() indicies = range(num_spectra) try: - mp = import_mantidplot() - plot = mp.plotSpectrum(ws, indicies, True) + mtd_plot = import_mantidplot() + plot = mtd_plot.plotSpectrum(ws, indicies, True) layer = plot.activeLayer() - layer.setAxisTitle(mp.Layer.Left, y_axis_title) + layer.setAxisTitle(mtd_plot.Layer.Left, y_axis_title) except RuntimeError: - #User clicked cancel on plot so don't do anything + # User clicked cancel on plot so don't do anything return def plotParameters(ws, *param_names): @@ -309,6 +309,7 @@ def plotParameters(ws, *param_names): @param ws - the workspace to plot from @param param_names - list of names to search for """ + axis = mtd[ws].getAxis(1) if axis.isText() and len(param_names) > 0: num_spectra = mtd[ws].getNumberHistograms() @@ -319,78 +320,78 @@ def plotParameters(ws, *param_names): plotSpectra(ws, name, indicies) def convertToElasticQ(input_ws, output_ws=None): - """ + """ Helper function to convert the spectrum axis of a sample to ElasticQ. @param input_ws - the name of the workspace to convert from @param output_ws - the name to call the converted workspace - """ - - if output_ws is None: - output_ws = input_ws - - axis = mtd[input_ws].getAxis(1) - if axis.isSpectra(): - e_fixed = getEfixed(input_ws) - ConvertSpectrumAxis(input_ws,Target='ElasticQ',EMode='Indirect',EFixed=e_fixed,OutputWorkspace=output_ws) - - elif axis.isNumeric(): - #check that units are Momentum Transfer - if axis.getUnit().unitID() != 'MomentumTransfer': - raise RuntimeError('Input must have axis values of Q') - - CloneWorkspace(input_ws, OutputWorkspace=output_ws) - else: - raise RuntimeError('Input workspace must have either spectra or numeric axis.') - + """ + + if output_ws is None: + output_ws = input_ws + + axis = mtd[input_ws].getAxis(1) + if axis.isSpectra(): + e_fixed = getEfixed(input_ws) + ConvertSpectrumAxis(input_ws, Target='ElasticQ', EMode='Indirect', EFixed=e_fixed, OutputWorkspace=output_ws) + + elif axis.isNumeric(): + # check that units are Momentum Transfer + if axis.getUnit().unitID() != 'MomentumTransfer': + raise RuntimeError('Input must have axis values of Q') + + CloneWorkspace(input_ws, OutputWorkspace=output_ws) + else: + raise RuntimeError('Input workspace must have either spectra or numeric axis.') + def transposeFitParametersTable(params_table, output_table=None): - """ + """ Transpose the parameter table created from a multi domain Fit. This function will make the output consistent with PlotPeakByLogValue. @param params_table - the parameter table output from Fit. - @param output_table - name to call the transposed table. If omitted, - the output_table will be the same as the params_table - """ - params_table = mtd[params_table] - - table_ws = '__tmp_table_ws' - table_ws = CreateEmptyTableWorkspace(OutputWorkspace=table_ws) - - param_names = params_table.column(0)[:-1] #-1 to remove cost function - param_values = params_table.column(1)[:-1] - param_errors = params_table.column(2)[:-1] - - #find the number of parameters per function - func_index = param_names[0].split('.')[0] - num_params = 0 - for i, name in enumerate(param_names): - if name.split('.')[0] != func_index: - num_params = i - break - - #create columns with parameter names for headers - column_names = ['.'.join(name.split('.')[1:]) for name in param_names[:num_params]] - column_error_names = [name + '_Err' for name in column_names] - column_names = zip(column_names, column_error_names) - table_ws.addColumn('double', 'axis-1') - for name, error_name in column_names: - table_ws.addColumn('double', name) - table_ws.addColumn('double', error_name) - - #output parameter values to table row - for i in xrange(0, params_table.rowCount()-1, num_params): - row_values = param_values[i:i+num_params] - row_errors = param_errors[i:i+num_params] - row = [value for pair in zip(row_values, row_errors) for value in pair] - row = [i/num_params] + row - table_ws.addRow(row) - - if output_table is None: - output_table = params_table.name() - - RenameWorkspace(table_ws.name(), OutputWorkspace=output_table) + @param output_table - name to call the transposed table. If omitted, + the output_table will be the same as the params_table + """ + params_table = mtd[params_table] + + table_ws = '__tmp_table_ws' + table_ws = CreateEmptyTableWorkspace(OutputWorkspace=table_ws) + + param_names = params_table.column(0)[:-1] #-1 to remove cost function + param_values = params_table.column(1)[:-1] + param_errors = params_table.column(2)[:-1] + + # find the number of parameters per function + func_index = param_names[0].split('.')[0] + num_params = 0 + for i, name in enumerate(param_names): + if name.split('.')[0] != func_index: + num_params = i + break + + # create columns with parameter names for headers + column_names = ['.'.join(name.split('.')[1:]) for name in param_names[:num_params]] + column_error_names = [name + '_Err' for name in column_names] + column_names = zip(column_names, column_error_names) + table_ws.addColumn('double', 'axis-1') + for name, error_name in column_names: + table_ws.addColumn('double', name) + table_ws.addColumn('double', error_name) + + # output parameter values to table row + for i in xrange(0, params_table.rowCount()-1, num_params): + row_values = param_values[i:i+num_params] + row_errors = param_errors[i:i+num_params] + row = [value for pair in zip(row_values, row_errors) for value in pair] + row = [i/num_params] + row + table_ws.addRow(row) + + if output_table is None: + output_table = params_table.name() + + RenameWorkspace(table_ws.name(), OutputWorkspace=output_table) def search_for_fit_params(suffix, table_ws): """ @@ -399,74 +400,79 @@ def search_for_fit_params(suffix, table_ws): @param suffix - the name of the parameter to find. @param table_ws - the name of the table workspace to search. """ + return [name for name in mtd[table_ws].getColumnNames() if name.endswith(suffix)] def convertParametersToWorkspace(params_table, x_column, param_names, output_name): - """ + """ Convert a parameter table output by PlotPeakByLogValue to a MatrixWorkspace. - This will make a spectrum for each parameter name using the x_column vairable as the + This will make a spectrum for each parameter name using the x_column vairable as the x values for the spectrum. @param params_table - the table workspace to convert to a MatrixWorkspace. @param x_column - the column in the table to use for the x values. @param parameter_names - list of parameter names to add to the workspace @param output_name - name to call the output workspace. - """ - #search for any parameters in the table with the given parameter names, - #ignoring their function index and output them to a workspace - workspace_names = [] - for param_name in param_names: - column_names = search_for_fit_params(param_name, params_table) - column_error_names = search_for_fit_params(param_name+'_Err', params_table) - param_workspaces = [] - for name, error_name in zip(column_names, column_error_names): - ConvertTableToMatrixWorkspace(params_table, x_column, name, error_name, OutputWorkspace=name) - param_workspaces.append(name) - workspace_names.append(param_workspaces) - - #transpose list of workspaces, ignoring unequal length of lists - #this handles the case where a parameter occurs only once in the whole workspace - workspace_names = map(list, itertools.izip_longest(*workspace_names)) - workspace_names = [filter(None, sublist) for sublist in workspace_names] - - #join all the parameters for each peak into a single workspace per peak - temp_workspaces = [] - for peak_params in workspace_names: - temp_peak_ws = peak_params[0] - for param_ws in peak_params[1:]: - ConjoinWorkspaces(temp_peak_ws, param_ws, False) - temp_workspaces.append(temp_peak_ws) - - #join all peaks into a single workspace - temp_workspace = temp_workspaces[0] - for temp_ws in temp_workspaces[1:]: - ConjoinWorkspaces(temp_workspace, temp_peak_ws, False) - - RenameWorkspace(temp_workspace, OutputWorkspace=output_name) - - #replace axis on workspaces with text axis - axis = TextAxis.create(mtd[output_name].getNumberHistograms()) - workspace_names = [name for sublist in workspace_names for name in sublist] - for i, name in enumerate(workspace_names): - axis.setLabel(i, name) - mtd[output_name].replaceAxis(1, axis) + """ + + # search for any parameters in the table with the given parameter names, + # ignoring their function index and output them to a workspace + workspace_names = [] + for param_name in param_names: + column_names = search_for_fit_params(param_name, params_table) + column_error_names = search_for_fit_params(param_name+'_Err', params_table) + param_workspaces = [] + for name, error_name in zip(column_names, column_error_names): + ConvertTableToMatrixWorkspace(params_table, x_column, name, error_name, OutputWorkspace=name) + param_workspaces.append(name) + workspace_names.append(param_workspaces) + + # transpose list of workspaces, ignoring unequal length of lists + # this handles the case where a parameter occurs only once in the whole workspace + workspace_names = map(list, itertools.izip_longest(*workspace_names)) + workspace_names = [filter(None, sublist) for sublist in workspace_names] + + # join all the parameters for each peak into a single workspace per peak + temp_workspaces = [] + for peak_params in workspace_names: + temp_peak_ws = peak_params[0] + for param_ws in peak_params[1:]: + ConjoinWorkspaces(temp_peak_ws, param_ws, False) + temp_workspaces.append(temp_peak_ws) + + # join all peaks into a single workspace + ##TODO: I'm not sure exactly what this is supposed to do + temp_workspace = temp_workspaces[0] + for temp_ws in temp_workspaces[1:]: + ConjoinWorkspaces(temp_workspace, temp_peak_ws, False) + + RenameWorkspace(temp_workspace, OutputWorkspace=output_name) + + # replace axis on workspaces with text axis + axis = TextAxis.create(mtd[output_name].getNumberHistograms()) + workspace_names = [name for sublist in workspace_names for name in sublist] + for i, name in enumerate(workspace_names): + axis.setLabel(i, name) + + mtd[output_name].replaceAxis(1, axis) def addSampleLogs(ws, sample_logs): - """ + """ Add a dictionary of logs to a workspace. The type of the log is inferred by the type of the value passed to the log. @param ws - workspace to add logs too. @param sample_logs - dictionary of logs to append to the workspace. - """ - for key, value in sample_logs.iteritems(): - if isinstance(value, bool): - log_type = 'String' - elif isinstance(value, (int, long, float)): - log_type = 'Number' - else: - log_type = 'String' - - AddSampleLog(Workspace=ws, LogName=key, LogType=log_type, LogText=str(value)) + """ + + for key, value in sample_logs.iteritems(): + if isinstance(value, bool): + log_type = 'String' + elif isinstance(value, (int, long, float)): + log_type = 'Number' + else: + log_type = 'String' + + AddSampleLog(Workspace=ws, LogName=key, LogType=log_type, LogText=str(value)) diff --git a/Code/Mantid/scripts/Inelastic/IndirectImport.py b/Code/Mantid/scripts/Inelastic/IndirectImport.py index 86c480f9fca1..93af6fcc2758 100644 --- a/Code/Mantid/scripts/Inelastic/IndirectImport.py +++ b/Code/Mantid/scripts/Inelastic/IndirectImport.py @@ -9,16 +9,17 @@ import sys from mantid import logger + def import_mantidplot(): - ''' Currently, all scripts in the PythonAlgorithms directory are imported - during system tests. Unfortunately, these tests are run outside of - MantidPlot and so are incompatible with scripts that import the + ''' Currently, all scripts in the PythonAlgorithms directory are imported + during system tests. Unfortunately, these tests are run outside of + MantidPlot and so are incompatible with scripts that import the "mantidplot" module. As a result, an error message is dumped to the results log for each PythonAlgorithm in the directory that imports mantidplot, for each and every test. - + Here, we silently catch all ImportErrors so that this does not occur. - + @returns the mantidplot module. ''' try: @@ -57,7 +58,7 @@ def unsupported_message(): def is_supported_f2py_platform(): ''' We check for numpy version, as if Linux we check its distro and version as well. - + @returns True if we are currently on a platform that supports the F2Py libraries, else False. ''' @@ -71,24 +72,24 @@ def is_supported_f2py_platform(): return False def import_f2py(lib_base_name): - ''' Until we can include the compilation process of Indirect F2Py modules + ''' Until we can include the compilation process of Indirect F2Py modules into the automated build of Mantid, we are forced to compile the libraries separately on every platform we wish to support. - + Here, we provide a centralised method through which we can import these modules, which hopefully makes the other Indirect scripts a lot less messy. - - @param lib_base_name :: is the prefix of the library name. For example, - the QLres_lnx64.so and QLres_win32.pyd libraries share the same base name + + @param lib_base_name :: is the prefix of the library name. For example, + the QLres_lnx64.so and QLres_win32.pyd libraries share the same base name of "QLres". - + @returns the imported module. ''' # Only proceed if we are indeed on one of the supported platforms. assert is_supported_f2py_platform() - + lib_name = lib_base_name + _lib_suffix() - + return __import__(lib_name) def run_f2py_compatibility_test():