forked from AmbaPant/mantid
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathLRReflectivityOutput.py
249 lines (213 loc) · 11.4 KB
/
LRReflectivityOutput.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# pylint: disable=no-init,invalid-name,bare-except
import math
import time
import mantid
from mantid.api import *
from mantid.simpleapi import *
from mantid.kernel import *
class LRReflectivityOutput(PythonAlgorithm):
def category(self):
return "Reflectometry\\SNS"
def name(self):
return "LRReflectivityOutput"
def version(self):
return 1
def summary(self):
return "Produce a single reflectivity curve from multiple reflectivity ranges."
def PyInit(self):
self.declareProperty(StringArrayProperty("ReducedWorkspaces", [], direction=Direction.Input),
"List of workspace names of reduced reflectivity parts to be put together")
self.declareProperty("SpecularCutoff", 0.01, "Q-value under which we should below the specular ridge")
self.declareProperty("ScaleToUnity", True, "If true, the reflectivity under the Q given cutoff will be scaled to 1")
self.declareProperty("ScalingWavelengthCutoff", 10.0, "Wavelength above which the scaling factors are assumed to be one")
self.declareProperty(FloatArrayProperty("OutputBinning", [0.005, -0.01, 1.0], direction=Direction.Input))
self.declareProperty("DQConstant", 0.0004, "Constant factor for the resolution dQ = dQ0 + Q dQ/Q")
self.declareProperty("DQSlope", 0.025, "Slope for the resolution dQ = dQ0 + Q dQ/Q")
self.declareProperty(FileProperty('OutputFilename', '', action=FileAction.Save, extensions=["txt"]),
doc='Name of the reflectivity file output')
self.declareProperty("MetaData", "", "Additional meta-data to add to the top of the output file")
def PyExec(self):
# Check that all the input workspaces are scaled
workspace_list = self.getProperty("ReducedWorkspaces").value
if not self.check_scaling(workspace_list):
logger.error("Absolute normalization not available!")
# Put the workspaces together
self.average_points_for_single_q(workspace_list)
def check_scaling(self, workspace_list):
"""
Check that all the workspaces are on an absolute scale.
@param workspace_list: list of workspaces to put together
"""
scaling_cutoff = self.getProperty("ScalingWavelengthCutoff").value
normalization_available = True
for ws in workspace_list:
if mtd[ws].getRun().hasProperty("isSFfound"):
if mtd[ws].getRun().getProperty("isSFfound").value == 'False':
try:
wl = mtd[ws].getRun().getProperty("LambdaRequest").value[0]
# Scaling factor above the wavelength cutoff are assumed to be 1
if wl > scaling_cutoff:
logger.notice("%s: no normalization needed for wl=%s" % (ws, str(wl)))
else:
logger.error("%s: normalization missing for wl=%s" % (ws, str(wl)))
normalization_available = False
except:
logger.error("%s: could not find LambdaRequest" % ws)
normalization_available = False
else:
logger.notice("%s: normalization found" % ws)
else:
logger.error("%s: no normalization info" % ws)
normalization_available = False
return normalization_available
#pylint: disable=too-many-locals,too-many-branches
def average_points_for_single_q(self, scaled_ws_list): # noqa
"""
Take the point with the smalled error when multiple points are
at the same q-value.
This code was originally part of the REFL UI.
@param scaled_ws_list: list of scaled workspaces to combine
"""
# Get binning parameters
binning_parameters = self.getProperty("OutputBinning").value
header_list = ("DataRun", "NormRun", "TwoTheta(deg)", "LambdaMin(A)",
"LambdaMax(A)", "Qmin(1/A)", "Qmax(1/A)", "SF_A", "SF_B", "PrimaryFrac")
header_info = "# %-9s %-9s %-14s %-14s %-12s %-12s %-12s %-12s %-12s %-12s\n" % header_list
# Convert each histo to histograms and rebin to final binning
for ws in scaled_ws_list:
new_name = "%s_histo" % ws
# ConvertToHistogram(InputWorkspace=ws, OutputWorkspace=new_name)
mtd[ws].setDistribution(True)
Rebin(InputWorkspace=ws, Params=binning_parameters,
OutputWorkspace=new_name)
# Gather info for meta data header
def _get_value(name, default=None):
if mtd[new_name].getRun().hasProperty(name):
return mtd[new_name].getRun().getProperty(name).value
return default
data_run = mtd[new_name].getRun().getProperty("run_number").value
norm_run = mtd[new_name].getRun().getProperty("normalization_run").value
two_theta = mtd[new_name].getRun().getProperty("two_theta").value
lambda_min = mtd[new_name].getRun().getProperty("lambda_min").value
lambda_max = mtd[new_name].getRun().getProperty("lambda_max").value
data_q_min = mtd[new_name].getRun().getProperty("q_min").value
data_q_max = mtd[new_name].getRun().getProperty("q_max").value
primary_fraction = mtd[new_name].getRun().getProperty("primary_fraction").value
scaling_factor_a = _get_value("scaling_factor_a", 1.0)
scaling_factor_b = _get_value("scaling_factor_b", 0.0)
value_list = (data_run, norm_run, two_theta, lambda_min, lambda_max, data_q_min,
data_q_max, scaling_factor_a, scaling_factor_b, primary_fraction)
header_info += "# %-9s %-9s %-14.6g %-14.6g %-12.6g %-12.6s %-12.6s %-12.6s %-12.6s %-12.6s\n" % value_list
# Take the first rebinned histo as our output
data_x = mtd[scaled_ws_list[0] + '_histo'].dataX(0)
data_y = mtd[scaled_ws_list[0] + '_histo'].dataY(0)
data_e = mtd[scaled_ws_list[0] + '_histo'].dataE(0)
# Skip first point and last one
points_to_skip = 1
for i in range(1, len(scaled_ws_list)):
skipped_points = 0
distribution_started = False
data_y_i = mtd[scaled_ws_list[i] + '_histo'].dataY(0)
data_e_i = mtd[scaled_ws_list[i] + '_histo'].dataE(0)
for j in range(len(data_y_i) - 1):
# Check whether we need to skip this point
if data_y_i[j] > 0:
distribution_started = True
if skipped_points < points_to_skip:
skipped_points += 1
continue
# If this is the last point of the distribution, skip it
if distribution_started and data_y_i[j + 1] == 0 and data_e_i[j + 1] == 0:
break
if data_y_i[j] > 0:
if data_y[j] > 0:
denom = 1.0 / data_e[j]**2 + 1.0 / data_e_i[j]**2
data_y[j] = (data_y[j]/data_e[j]**2 + data_y_i[j]/data_e_i[j]**2) / denom
data_e[j] = math.sqrt(1.0 / denom)
#data_y[j] = 0.5 * (data_y[j] + data_y_i[j])
#data_e[j] = 0.5 * math.sqrt(data_e[j] * data_e[j] + data_e_i[j] * data_e_i[j])
else:
data_y[j] = data_y_i[j]
data_e[j] = data_e_i[j]
# Skip the first point
for i in range(len(data_y)):
if data_y[i] > 0:
data_y[i] = 0.0
break
# Scale to unity
scale_to_unity = self.getProperty("ScaleToUnity").value
specular_cutoff = self.getProperty("SpecularCutoff").value
scaling_factor = 1.0
if scale_to_unity is True:
y_values = []
e_values = []
for i in range(len(data_y)):
if data_y[i] > 0 and data_x[i] < specular_cutoff:
y_values.append(data_y[i])
e_values.append(data_e[i])
# Compute the scaling factor to bring the specular ridge to 1
total = 0.0
weights = 0.0
for i in range(len(y_values)):
w = 1.0 / e_values[i]**2
total += w * y_values[i]
weights += w
if weights > 0:
scaling_factor = total / weights
Scale(InputWorkspace=scaled_ws_list[0] + '_histo', OutputWorkspace=scaled_ws_list[0] + '_scaled',
Factor=1.0 / scaling_factor, Operation='Multiply')
# Save the data
file_path = self.getProperty("OutputFilename").value
dq0 = self.getProperty("DQConstant").value
dq_over_q = self.getProperty("DQSlope").value
meta_data = self.getProperty("MetaData").value
data_x = mtd[scaled_ws_list[0] + '_scaled'].dataX(0)
data_y = mtd[scaled_ws_list[0] + '_scaled'].dataY(0)
data_e = mtd[scaled_ws_list[0] + '_scaled'].dataE(0)
start_time = mtd[scaled_ws_list[0] + '_scaled'].getRun().getProperty("start_time").value
experiment = mtd[scaled_ws_list[0] + '_scaled'].getRun().getProperty("experiment_identifier").value
run_number = mtd[scaled_ws_list[0] + '_scaled'].getRun().getProperty("run_number").value
run_title = mtd[scaled_ws_list[0] + '_scaled'].getTitle()
content = '# Experiment %s Run %s\n' % (experiment, run_number)
content += '# Run title: %s\n' % run_title
content += '# Run start time: %s\n' % start_time
content += '# Reduction time: %s\n' % time.ctime()
content += '# Mantid version: %s\n' % mantid.__version__
content += '# Scaling factor: %s\n' % scaling_factor
content += header_info
try:
if len(meta_data.strip()) > 0:
content += '#\n'
lines = meta_data.strip().split('\n')
for l in lines:
content += '# %s\n' % l
content += '#\n'
except:
logger.error("Could not write meta-data to reflectivity file.")
content += '# dQ0[1/Angstrom] = %g\n' % dq0
content += '# dQ/Q = %g\n' % dq_over_q
content += '# Q[1/Angstrom] R delta_R Precision\n'
for i in range(len(data_x)):
# Skip point where the error is larger than the reflectivity value
if data_y[i] > data_e[i]:
content += str(data_x[i])
content += ' ' + str(data_y[i])
content += ' ' + str(data_e[i])
_precision = str(dq0 + dq_over_q * data_x[i])
content += ' ' + _precision
content += '\n'
f = open(file_path, 'w')
f.write(content)
f.close()
for ws in scaled_ws_list:
if AnalysisDataService.doesExist(ws + '_histo'):
AnalysisDataService.remove(ws + '_histo')
if AnalysisDataService.doesExist(ws + '_scaled'):
AnalysisDataService.remove(ws + '_scaled')
AlgorithmFactory.subscribe(LRReflectivityOutput)