Skip to content

Commit

Permalink
Merge remote branch 'origin/feature/8566_scd_reduce_config'
Browse files Browse the repository at this point in the history
  • Loading branch information
VickieLynch committed Dec 9, 2013
2 parents 670b76d + 1d4e215 commit 902164c
Show file tree
Hide file tree
Showing 4 changed files with 224 additions and 34 deletions.
53 changes: 33 additions & 20 deletions Code/Mantid/scripts/SCD_Reduction/ReduceDictionary.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,33 +12,46 @@
# The run numbers themselves may be specified as a comma separated list of
# individual run numbers, or ranges specified with a colon separator.
#
def LoadDictionary( filename ):
params_dictionary = {}
run_nums = []
file = open(filename)
def LoadDictionary( *filenames, **kwargs ):
# create a dictionary to load into
params_dictionary = kwargs.get("existing", {})
# create a list of run numbers
run_nums = params_dictionary.get("run_nums", [])

file = open(filenames[0])
for line in file:
line = line.strip();
line = line.rstrip();
if (not line.startswith('#')) and len(line) > 2:
words = line.split()
if len(words) > 1:
if words[1] == "None":
params_dictionary[words[0]] = None
elif words[1] == "True":
params_dictionary[words[0]] = True
elif words[1] == "False":
params_dictionary[words[0]] = False
elif words[0] == "run_nums":
run_list = ParseRunList(words[1])
for i in range(0,len(run_list)):
run_nums.append(run_list[i])
else:
params_dictionary[words[0]] = words[1]
else:
# error check the number of values
if len(words) < 2:
print "Syntax Error On Line: " + line
# set the value
else:
(key, value) = words[0:2]

# fix up special values
if value.lower() == "none":
value = None
elif value.lower() == "true":
value = True
elif value.lower() == "false":
value = False

# set the values
if key == "run_nums":
run_nums.extend(ParseRunList(value))
else:
params_dictionary[key] = value

params_dictionary["run_nums"]=run_nums

params_dictionary["run_nums"]=run_nums
return params_dictionary;
# it isn't awesome without recursion
if len(filenames) > 1:
return LoadDictionary(*filenames[1:], existing=params_dictionary)
else:
return params_dictionary;

#
# Return a list of run numbers from a string containing a comma separated
Expand Down
20 changes: 10 additions & 10 deletions Code/Mantid/scripts/SCD_Reduction/ReduceSCD_OneRun.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,17 +52,17 @@
# Get the config file name and the run number to process from the command line
#
if (len(sys.argv) < 3):
print "You MUST give the config file name and run number on the command line"
print "You MUST give the config file name(s) and run number on the command line"
exit(0)

config_file_name = sys.argv[1]
run = sys.argv[2]
config_files = sys.argv[1:-1]
run = sys.argv[-1]

#
# Load the parameter names and values from the specified configuration file
# into a dictionary and set all the required parameters from the dictionary.
#
params_dictionary = ReduceDictionary.LoadDictionary( config_file_name )
params_dictionary = ReduceDictionary.LoadDictionary( *config_files )

instrument_name = params_dictionary[ "instrument_name" ]
calibration_file_1 = params_dictionary.get('calibration_file_1', None)
Expand Down Expand Up @@ -90,7 +90,7 @@
use_sphere_integration = params_dictionary.get('use_sphere_integration', True)
use_ellipse_integration = params_dictionary.get('use_ellipse_integration', False)
use_fit_peaks_integration = params_dictionary.get('use_fit_peaks_integration', False)
use_cylinder_integration = params_dictionary.get('use_cylinder_integration', False)
use_cylindrical_integration = params_dictionary.get('use_cylindrical_integration', False)

peak_radius = params_dictionary[ "peak_radius" ]
bkg_inner_radius = params_dictionary[ "bkg_inner_radius" ]
Expand Down Expand Up @@ -254,7 +254,7 @@
BackgroundInnerRadius=bkg_inner_radius,
PeaksWorkspace=peaks_ws,
IntegrateIfOnEdge=integrate_if_edge_peak )
elif use_cylinder_integration:
elif use_cylindrical_integration:
#
# Integrate found or predicted peaks in Q space using spheres, and save
# integrated intensities, with Niggli indexing. First get an un-weighted
Expand All @@ -272,7 +272,7 @@
BackgroundInnerRadius=bkg_inner_radius,
PeaksWorkspace=peaks_ws,
IntegrateIfOnEdge=integrate_if_edge_peak,
Cylinder=use_cylinder_integration,CylinderLength=cylinder_length,
Cylinder=use_cylindrical_integration,CylinderLength=cylinder_length,
PercentBackground=cylinder_percent_bkg,
IntegrationOption=cylinder_int_option,
ProfileFunction=cylinder_profile_fit)
Expand All @@ -293,7 +293,7 @@
BackgroundOuterSize = bkg_outer_radius,
BackgroundInnerSize = bkg_inner_radius )

elif use_cylinder_integration:
elif use_cylindrical_integration:
profiles_filename = output_directory + "/" + instrument_name + '_' + run + '.profiles'
MDEW = ConvertToMD( InputWorkspace=event_ws, QDimensions="Q3D",
dEAnalysisMode="Elastic", QConversionScales="Q in A^-1",
Expand All @@ -319,7 +319,7 @@
Filename=run_niggli_integrate_file )

# Print warning if user is trying to integrate using the cylindrical method and transorm the cell
if use_cylinder_integration:
if use_cylindrical_integration:
if (not cell_type is None) or (not centering is None):
print "WARNING: Cylindrical profiles are NOT transformed!!!"
#
Expand All @@ -341,7 +341,7 @@

end_time = time.time()
print '\nReduced run ' + str(run) + ' in ' + str(end_time - start_time) + ' sec'
print 'using config file ' + config_file_name
print 'using config file(s) ' + ", ".join(config_files)

#
# Try to get this to terminate when run by ReduceSCD_Parallel.py, from NX session
Expand Down
8 changes: 4 additions & 4 deletions Code/Mantid/scripts/SCD_Reduction/ReduceSCD_Parallel.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,14 +72,14 @@ def run ( self ):
print "You MUST give the config file name on the command line"
exit(0)

config_file_name = sys.argv[1]
config_files = sys.argv[1:]

#
# Load the parameter names and values from the specified configuration file
# into a dictionary and set all the required parameters from the dictionary.
#

params_dictionary = ReduceDictionary.LoadDictionary( config_file_name )
params_dictionary = ReduceDictionary.LoadDictionary( *config_files )

exp_name = params_dictionary[ "exp_name" ]
output_directory = params_dictionary[ "output_directory" ]
Expand Down Expand Up @@ -115,7 +115,7 @@ def run ( self ):
index = 0
for r_num in run_nums:
list.append( ProcessThread() )
cmd = '%s %s %s %s' % (python, reduce_one_run_script, config_file_name, str(r_num))
cmd = '%s %s %s %s' % (python, reduce_one_run_script, " ".join(config_files), str(r_num))
if slurm_queue_name is not None:
console_file = output_directory + "/" + str(r_num) + "_output.txt"
cmd = 'srun -p ' + slurm_queue_name + \
Expand Down Expand Up @@ -262,6 +262,6 @@ def run ( self ):
print "**************************************************************************************\n"

print 'Total time: ' + str(end_time - start_time) + ' sec'
print 'Connfig file: ' + config_file_name
print 'Config file: ' + ", ".join(config_files)
print 'Script file: ' + reduce_one_run_script + '\n'
print
177 changes: 177 additions & 0 deletions Code/Mantid/scripts/SCD_Reduction/TOPAZ.config
Original file line number Diff line number Diff line change
@@ -0,0 +1,177 @@
# Default configuration file for TOPAZ
instrument_name TOPAZ # prefix for run file names

#
# Specify calibration file(s). SNAP requires two calibration files, one
# for each bank. If the default detector position is to be used, specify
# None as the calibration file name.
#
calibration_file_1 None
calibration_file_2 None

#
# Set the data_directory to None to use findnexus to get the run file when
# running this on the SNS systems. On other systems, all of the input files
# must be copied into one directory and that directory must be specified as
# the data_directory
#
data_directory None
output_directory None

#
# Min & max tof determine the range of events loaded.
# Max Q determines the range of Q values that will be mapped to
# reciprocal space.
# Min & max monitor tof determine the range of tofs integrated
# in the monitor data to get the total monitor counts
#
min_tof 1000
max_tof 16666
max_Q 20
monitor_index 0
min_monitor_tof 1000
max_monitor_tof 12500

#
# Read the UB matrix from file. This option will be applied to each run and
# used for combined file. This option is especially helpful for 2nd frame
# TOPAZ data.
read_UB False
UB_filename None

# Use FundUBUsingLatticeParameters to optimize the given UB for each run?
optimize_UB True

# Use FundUBUsingLatticeParameters to find common UB (instead for FFT)
# This option will find the UB for the fist run and the cell parametes in the
# algorithm, unless a UB has been specified: in this case the values in the
# specified file will be used.
UseFirstLattice True

#
# Specifiy a conventional cell type and centering. If these are None, only
# one .mat and .integrate file will be written for this run, and they will
# be in terms of the Niggli reduced cell. If these specifiy a valid
# cell type and centering, an additional .mat and .integrate file will be
# written for the specified cell_type and centering. NOTE: If run in
# parallel, the driving script will only read the Niggli version of the
# .integrate file, and will combine, re-index and convert to a conventional
# cell, so these can usually be left as None.
#
# Cell trnasformation is not applied to cylindrical profiles,
# i.e. use None if cylindrical integration is used!
#
cell_type None
centering None

#
# Number of peaks to find, per run, both for getting the UB matrix,
# AND to determine how many peaks are integrated, if peak positions are
# NOT predicted. NOTE: This number must be choosen carefully. If too
# many peaks are requested, find peaks will take a very long time and
# the returned peaks will probably not even index, since most of them
# will be "noise" peaks. If too few are requested, then there will be
# few peaks to be integrated, and the UB matrix may not be as accurate
# as it should be for predicting peaks to integrate.
#
num_peaks_to_find 500

#
# min_d, max_d and tolerance control indexing peaks. max_d is also
# used to specify a threshold for the separation between peaks
# returned by FindPeaksMD, so it should be specified somewhat larger
# than the largest cell edge in the Niggli reduced cell for the
# sample.
#
min_d 4
max_d 8
tolerance 0.12

#
# If predicted peak positions are to be integrated,
# the integrate_predicted_peaks flag should be set to True and the range
# of wavelengths and d-spacings must be specified
#
integrate_predicted_peaks False
min_pred_wl 0.25
max_pred_wl 3.5
min_pred_dspacing 0.2
max_pred_dspacing 8.5

#
# Select only ONE of the following integration methods, by setting the
# use_*****_integration flag True.
#
# One of these will be set to True in the experiment config
use_sphere_integration False
use_ellipse_integration False
use_fit_peaks_integration False
use_cylindrical_integration False

#
# Specify sphere and ellipse integration control parameters. Check that these
# are correct, if use_sphere_integration, or use_ellipse_integration is True.
# Otherwise the values aren't used.
#
peak_radius 0.075 # for sphere integration only
bkg_inner_radius 0.075 # for sphere or ellipse integration
bkg_outer_radius 0.095 # for sphere or ellipse integration
integrate_if_edge_peak True # for sphere integration only

#
# Specify ellispe integration control parameters
#
ellipse_region_radius 0.16
ellipse_size_specified False

#
# Specify fit peaks integration control parameters. Check that these are
# correct, if use_fit_peaks_integration = True. Otherwise the values
# aren't used.
#
rebin_step -0.004
preserve_events True
use_ikeda_carpenter False
n_bad_edge_pixels 0

#
# Specify cylindrical integration control parameters
#
cylinder_radius 0.05
cylinder_length 0.30
cylinder_percent_bkg 20.0
cylinder_int_option GaussianQuadrature
cylinder_profile_fit Gaussian

# ==========================================================================
# Additional Parameters needed by ReduceSCD_Parallel.py, to process
# multiple runs in parallel.
# ==========================================================================
#
exp_name None
reduce_one_run_script ReduceSCD_OneRun.py

#
# Specify the run numbers that should be reduced. This can be done on several
# lines. Each line must start with the parameter name run_nums and be followed
# by a comma separated list of individual run numbers or ranges of run numbers.
# A range of run numbers is specified by listing the first number and last
# number in the range, separated by a colon.
#
#run_nums 8525:8551

#
# Specify the slurm partion, or None to use local processes. The parameter
# max_processes controls the maximum number of processes that will be run
# simultaneously locally, or that will be simultaneously submitted to slurm.
# The value of max_processes should be choosen carefully with the size of the
# system in mind, to avoid overloading the system. Since the lower level
# calculations are all multi-threaded, this should be substantially lower than
# the total number of cores available.
# All runs will be processed eventually. If there are more runs than then
# max_processes, as some processes finish, new ones will be started, until
# all runs have been processed.
#
#slurm_queue_name topazq
#slurm_queue_name None
#max_processes 13

0 comments on commit 902164c

Please sign in to comment.