Skip to content

Commit

Permalink
Started to implement the automatic scaling factor inside the reductio…
Browse files Browse the repository at this point in the history
…n for a given lambda requested. This refs #4303
  • Loading branch information
JeanBilheux committed Mar 6, 2012
1 parent 88a7108 commit 7bfd240
Show file tree
Hide file tree
Showing 3 changed files with 81 additions and 28 deletions.
39 changes: 23 additions & 16 deletions Code/Mantid/Framework/PythonAPI/PythonAlgorithms/RefLReduction.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ def PyExec(self):
subtract_norm_bck = self.getProperty("SubtractNormBackground")

#name of the sfCalculator txt file
sfCalculator = "/home/j35/Desktop/SFcalculator.txt"
sfCalculatorPath = "/home/j35/Desktop/"
slitsValuePrecision = 0.1 #precision of slits = 10%

# Pick a good workspace n ame
Expand Down Expand Up @@ -143,10 +143,6 @@ def PyExec(self):
if not mtd.workspaceExists(ws_event_data):
LoadEventNexus(Filename=data_file, OutputWorkspace=ws_event_data)

print 'Data File(s) #'
print run_numbers


# Get metadata
mt_run = mtd[ws_event_data].getRun()
##get angles value
Expand Down Expand Up @@ -307,19 +303,24 @@ def PyExec(self):
ws_norm_histo_data = ws_name+"_histo"

if not mtd.workspaceExists(ws_norm_event_data):
LoadEventNexus(norm_file, ws_norm_event_data)

LoadEventNexus(Filename=norm_file,
OutputWorkspace=ws_norm_event_data)

# Rebin data
Rebin(InputWorkspace=ws_norm_event_data, OutputWorkspace=ws_norm_histo_data,
Rebin(InputWorkspace=ws_norm_event_data,
OutputWorkspace=ws_norm_histo_data,
Params=[TOFrange[0],
TOFsteps,
TOFrange[1]])

# Keep only range of TOF of interest
CropWorkspace(ws_norm_histo_data, ws_norm_histo_data, XMin=TOFrange[0], XMax=TOFrange[1])
CropWorkspace(InputWorkspace=ws_norm_histo_data,
OutputWorkspace=ws_norm_histo_data,
XMin=TOFrange[0], XMax=TOFrange[1])

# Normalized by Current (proton charge)
NormaliseByCurrent(InputWorkspace=ws_norm_histo_data, OutputWorkspace=ws_norm_histo_data)
NormaliseByCurrent(InputWorkspace=ws_norm_histo_data,
OutputWorkspace=ws_norm_histo_data)

#Create a new event workspace of only the range of pixel of interest
#background range (along the y-axis) and of only the pixel
Expand Down Expand Up @@ -365,8 +366,10 @@ def PyExec(self):
Transpose(InputWorkspace=ws_transposed,
OutputWorkspace=ws_data_bck_2)

ConvertToHistogram(ws_data_bck_1, OutputWorkspace=ws_data_bck_1)
ConvertToHistogram(ws_data_bck_2, OutputWorkspace=ws_data_bck_2)
ConvertToHistogram(InputWorkspace=ws_data_bck_1,
OutputWorkspace=ws_data_bck_1)
ConvertToHistogram(InputWorkspace=ws_data_bck_2,
OutputWorkspace=ws_data_bck_2)

RebinToWorkspace(WorkspaceToRebin=ws_data_bck_1,
WorkspaceToMatch=ws_integrated_data,
Expand All @@ -376,10 +379,14 @@ def PyExec(self):
WorkspaceToMatch=ws_integrated_data,
OutputWorkspace=ws_data_bck_2)

WeightedMean(ws_data_bck_1, ws_data_bck_2, ws_data_bck)
WeightedMean(InputWorkspace1=ws_data_bck_1,
InputWorkspace2=ws_data_bck_2,
OutputWorkspace=ws_data_bck)

ws_norm = "_NormWks"
Minus(ws_integrated_data, ws_data_bck, OutputWorkspace=ws_norm)
Minus(LHSWorkspace=ws_integrated_data,
RHSWorkspace=ws_data_bck,
OutputWorkspace=ws_norm)

#Clean up intermediary workspaces
# mtd.deleteWorkspace(ws_data_bck)
Expand Down Expand Up @@ -419,7 +426,7 @@ def PyExec(self):
# this is where we need to apply the scaling factor
ws_data_scaled = wks_utility.applySF(ws_data,
slitsValuePrecision,
sfCalculatorFile=sfCalculator)
sfCalculatorPath=sfCalculatorPath)

if dMD is not None and theta is not None:

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -403,6 +403,14 @@ def createIndividualList(string_list_files):
return {'list_runs':list_runs,
'list_attenuator':list_attenuator}

def getLambdaValue(mt):
"""
return the lambdaRequest value
"""
mt_run = mt.getRun()
_lambda = mt_run.getProperty('LambdaRequest').value
return _lambda

def getSh(mt, top_tag, bottom_tag):
"""
returns the height and units of the given slit#
Expand Down Expand Up @@ -432,9 +440,9 @@ def getS2h(mt=None):
return _h, units
return None, None

def getSlitsValue(full_list_runs, S1H, S2H):
def getSlitsValueAndLambda(full_list_runs, S1H, S2H, lambdaRequest):
"""
Retrieve the S1H and S2H values
Retrieve the S1H, S2H and lambda requested values
"""
_nbr_files = len(full_list_runs)
for i in range(_nbr_files):
Expand All @@ -447,6 +455,9 @@ def getSlitsValue(full_list_runs, S1H, S2H):
_s2h_value, _s2h_units = getS2h(mt1)
S1H[i] = _s1h_value
S2H[i] = _s2h_value

_lambda_value = getLambdaValue(mt1)
lambdaRequest[i] = _lambda_value

def isRunsSorted(list_runs, S1H, S2H):
"""
Expand Down Expand Up @@ -500,7 +511,7 @@ def calculateAndFit(numerator='',
return cal1

#if __name__ == '__main__':
def calculate(string_runs=None, list_peak_back=None, output_file=None):
def calculate(string_runs=None, list_peak_back=None, output_path=None):
"""
In this current version, the program will automatically calculates
the scaling function for up to, and included, 6 attenuators.
Expand All @@ -519,7 +530,7 @@ def calculate(string_runs=None, list_peak_back=None, output_file=None):
[peak_min_run2, peak_max_run2, back_min_run2, back_max_run2],
[...]]
output_file = full path to output file name (folder must exist)
output_path = where the scaling factor files will be written
"""

Expand Down Expand Up @@ -571,10 +582,24 @@ def calculate(string_runs=None, list_peak_back=None, output_file=None):
#####

#retrieve the S1H and S2H val/units for each NeXus
#retrieve the lambdaRequest value (Angstrom)
S1H = {}
S2H = {}
getSlitsValue(list_runs, S1H, S2H)

lambdaRequest = {}
getSlitsValueAndLambda(list_runs, S1H, S2H, lambdaRequest)

#Make sure all the lambdaRequested are identical within a given range
lambdaRequestPrecision = 0.01 #1%
_lr = lambdaRequest[0]
for i in lambdaRequest:
_localValue = float(lambdaRequest[i][0])
_localValueRate = lambdaRequestPrecision * _localValue
_leftValue = _localValue - _localValueRate
_rightValue = _localValue + _localValueRate

if (_localValue < _leftValue) or (_localValue > _rightValue):
raise Exception("lambda requested do not match !")

#make sure the file are sorted from smaller to bigger openning
if isRunsSorted(list_runs, S1H, S2H):

Expand Down Expand Up @@ -627,7 +652,6 @@ def calculate(string_runs=None, list_peak_back=None, output_file=None):
list_objects=list_objects)

recordSettings(a, b, error_a, error_b, name, cal)
# plotObject(cal)

if (i < (len(list_runs) - 1) and
list_attenuator[i + 1] == (_attenuator+1)):
Expand All @@ -636,10 +660,14 @@ def calculate(string_runs=None, list_peak_back=None, output_file=None):
#record S1H and S2H
finalS1H.append(S1H[index_numerator])
finalS2H.append(S2H[index_numerator])

#output the fitting parameters in an ascii
if (output_file is None):
output_file = '/home/j35/Desktop/SFcalculator.txt'
if (output_path is None):
output_path = '/home/j35/Desktop/'

output_pre = 'SFcalculator_lr' + str(lambdaRequest[0][0])
output_ext = '.txt'
output_file = output_path + output_pre + output_ext

outputFittingParameters(a, b, error_a, error_b, finalS1H, finalS2H, output_file)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,14 @@ def getS2h(mt=None):
return _h, units
return None, None

def getLambdaValue(mt):
"""
return the lambdaRequest value
"""
mt_run = mt.getRun()
_lambda = mt_run.getProperty('LambdaRequest').value
return _lambda

def getPixelXPixelY(mt1, maxX=304, maxY=256):
"""
returns the PixelX_vs_PixelY array of the workspace data specified
Expand Down Expand Up @@ -784,13 +792,23 @@ def calc_center_of_mass(arr_x, arr_y, A):

def applySF(InputWorkspace,
slitsValuePrecision,
sfCalculatorFile=None):
sfCalculatorPath=os.path.abspath('.')):
"""
Function that apply scaling factor to data using sfCalculator.txt
file created by the sfCalculator procedure
"""

if (sfCalculatorFile is not None) and (os.path.isfile(sfCalculatorFile)):
#retrieve the lambdaRequested and check if we can find the sfCalculator
#file corresponding to that lambda
_lr = getLambdaValue(mtd[InputWorkspace])
_lr_value = _lr[0]

output_path= sfCalculatorPath
output_pre = 'SFcalculator_lr' + str(_lr_value)
output_ext = '.txt'
sfCalculatorFile = output_path+ output_pre + output_ext

if (os.path.isfile(sfCalculatorFile)):

#parse file and put info into array
f = open(sfCalculatorFile,'r')
Expand Down

0 comments on commit 7bfd240

Please sign in to comment.