forked from npshub/mantid
-
Notifications
You must be signed in to change notification settings - Fork 0
/
DirectEnergyConversion.py
1991 lines (1772 loc) · 96.7 KB
/
DirectEnergyConversion.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=too-many-lines
#pylint: disable=invalid-name
#pylind: disable=attribute-defined-outside-init
from mantid.simpleapi import *
from mantid.kernel import funcinspect
from mantid import geometry,api
import os.path
import copy
import math
import time
import numpy as np
import collections
import Direct.CommonFunctions as common
import Direct.diagnostics as diagnostics
from Direct.PropertyManager import PropertyManager
from Direct.RunDescriptor import RunDescriptor
from Direct.ReductionHelpers import extract_non_system_names,process_prop_list
def setup_reducer(inst_name,reload_instrument=False):
"""
Given an instrument name or prefix this sets up a converter
object for the reduction. Deprecated method
"""
try:
return DirectEnergyConversion(inst_name,reload_instrument)
except RuntimeError:
raise RuntimeError('Unknown instrument "%s" or wrong IDF file for this instrument, cannot continue' % inst_name)
#How could it be that abstract class is not referenced R0921? What it means?
#pylint: disable=too-many-instance-attributes
class DirectEnergyConversion(object):
"""
Performs a convert to energy assuming the provided instrument is
an direct inelastic geometry instrument
The class defines various methods to allow users to convert their
files to Energy transfer
Usage:
>>red = DirectEnergyConversion('InstrumentName')
and then:
>>red.convert_to_energy(wb_run,sample_run,ei_guess,rebin)
or
>>red.convert_to_energy(wb_run,sample_run,ei_guess,rebin,**arguments)
or
>>red.convert_to_energy(wb_run,sample_run,ei_guess,rebin,mapfile,**arguments)
or
>>red.prop_man.sample_run = run number
>>red.prop_man.wb_run = Whitebeam
>>red.prop_man.incident_energy = energy guess
>>red.prop_man.energy_bins = [min_val,step,max_val]
>>red.convert_to_energy()
Where:
Whitebeam run number or file name or workspace
sample_run sample run number or file name or workspace
ei_guess suggested value for incident energy of neutrons in direct inelastic instrument
energy_bins energy binning requested for resulting spe workspace.
mapfile Mapfile -- if absent/'default' the defaults from IDF are used
monovan_run If present will do the absolute units normalization. Number of additional parameters
specified in **kwargs is usually requested for this. If they are absent,
program uses defaults, but the defaults (e.g. sample_mass or sample_rmm )
are usually incorrect for a particular run.
arguments The dictionary containing additional keyword arguments.
The list of allowed additional arguments is defined in InstrName_Parameters.xml
file, located in:
MantidPlot->View->Preferences->Mantid->Directories->Parameter Definitions
Usage examples:
with run numbers as input:
>>red.convert_to_energy(1000,10001,80,[-10,.1,70]) # will run on default instrument
>>red.convert_to_energy(1000,10001,80,[-10,.1,70],'mari_res', additional keywords as required)
>>red.convert_to_energy(1000,10001,80,'-10,.1,70','mari_res',fixei=True)
A detector calibration file must be specified if running the reduction with workspaces as input
namely:
>>w2=cred.onvert_to_energy('wb_wksp','run_wksp',ei,rebin_params,mapfile,det_cal_file=cal_file
,diag_remove_zero=False,norm_method='current')
All available keywords are provided in InstName_Parameters.xml file
Some samples are:
norm_method =[monitor-1],[monitor-2][Current]
background =False , True
fixei =False , True
save_format =['.spe'],['.nxspe'],'none'
detector_van_range =[20,40] in mev
bkgd_range =[15000,19000] :integration range for background tests
second_white - If provided an additional set of tests is performed on this.
(default = None)
hardmaskPlus - A file specifying those spectra that should be masked
without testing (default=None)
tiny - Minimum threshold for acceptance (default = 1e-10)
large - Maximum threshold for acceptance (default = 1e10)
bkgd_range - A list of two numbers indicating the background range
(default=instrument defaults)
diag_van_median_rate_limit_lo - Lower bound defining outliers as fraction of median value (default = 0.01)
diag_van_median_rate_limit_hi - Upper bound defining outliers as fraction of median value (default = 100.)
diag_van_median_sigma_lo - Fraction of median to consider counting low for the white beam diag (default = 0.1)
diag_van_median_sigma_hi - Fraction of median to consider counting high for the white beam diag (default = 1.5)
diag_van_sig - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the
difference with respect to the median value must also exceed this number of error bars (default=0.0)
diag_remove_zero - If true then zeroes in the vanadium data will count as failed (default = True)
diag_samp_samp_median_sigma_lo - Fraction of median to consider counting low for the white beam diag (default = 0)
diag_samp_samp_median_sigma_hi - Fraction of median to consider counting high for the white beam diag (default = 2.0)
diag_samp_sig - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the
difference with respect to the median value must also exceed this number of
error bars (default=3.3)
variation - The number of medians the ratio of the first/second white beam can deviate from
the average by (default=1.1)
bleed_test - If true then the CreatePSDBleedMask algorithm is run
bleed_maxrate - If the bleed test is on then this is the maximum framerate allowed in a tube
bleed_pixels - If the bleed test is on then this is the number of pixels ignored within the
bleed test diagnostic
print_diag_results - If True then the results are printed to the screen
diag_remove_ero =True, False (default):Diag zero counts in background range
bleed=True , turn bleed correction on and off on by default for Merlin and LET
sum =True,False(default) , sum multiple files
det_cal_file= a valid detector block file and path or a raw file. Setting this
will use the detector calibraion from the specified file NOT the
input raw file
mask_run = RunNumber to use for diag instead of the input run number
one2one =True, False :Reduction will not use a mapping file
hardmaskPlus=Filename :load a hardmarkfile and apply together with diag mask
hardmaskOnly=Filename :load a hardmask and use as only mask
"""
#-------------------------------------------------------------------------------
#pylint: disable=too-many-branches
#pylint: disable=too-many-locals
def diagnose(self, white,diag_sample=None,**kwargs):
"""run diagnostics on the provided workspaces.
this method does some additional processing before moving on to the diagnostics:
1) Computes the white beam integrals, converting to energy
2) Computes the background integral using the instrument defined range
3) Computes a total count from the sample
these inputs are passed to the diagnostics functions
required inputs:
white - A workspace, run number or filepath of a white beam run. A workspace is assumed to
have simple been loaded and nothing else.
optional inputs:
diag_sample - A workspace, run number or filepath of additional (sample) run used for diagnostics.
A workspace is assumed to have simple been loaded and nothing else. (default = None)
second_white - If provided an additional set of tests is performed on this. (default = None)
hard_mask - A file specifying those spectra that should be masked without testing (default=None)
# IDF-based diagnostics parameters:
tiny - Minimum threshold for acceptance (default = 1e-10)
huge - Maximum threshold for acceptance (default = 1e10)
background_test_range - A list of two numbers indicating the background range (default=instrument defaults)
van_out_lo - Lower bound defining outliers as fraction of median value (default = 0.01)
van_out_hi - Upper bound defining outliers as fraction of median value (default = 100.)
van_lo - Fraction of median to consider counting low for the white beam diag (default = 0.1)
van_hi - Fraction of median to consider counting high for the white beam diag (default = 1.5)
van_sig - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the\n"
"difference with respect to the median value must also exceed this number of error bars (default=0.0)
samp_zero - If true then zeros in the vanadium data will count as failed (default = True)
samp_lo - Fraction of median to consider counting low for the white beam diag (default = 0)
samp_hi - Fraction of median to consider counting high for the white beam diag (default = 2.0)
samp_sig - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the\n"
"difference with respect to the median value must also exceed this number of error bars (default=3.3)
variation - The number of medians the ratio of the first/second white beam can deviate from
the average by (default=1.1)
bleed_test - If true then the CreatePSDBleedMask algorithm is run
bleed_maxrate - If the bleed test is on then this is the maximum framerate allowed in a tube
bleed_pixels - If the bleed test is on then this is the number of pixels ignored within the
bleed test diagnostic
"""
# output workspace name.
try:
_,r = funcinspect.lhs_info('both')
out_ws_name = r[0]
#pylint: disable=bare-except
except:
out_ws_name = None
# modify properties using input arguments
self.prop_man.set_input_parameters(**kwargs)
# obtain proper run descriptor in case it is not a run descriptor but
# something else
white = self.get_run_descriptor(white)
if self.use_hard_mask_only:
# build hard mask
diag_mask = white.get_masking(1)
if diag_mask is None:
# in this peculiar way we can obtain working mask which
# accounts for initial data grouping in the
# data file. SNS or 1 to 1 maps may probably avoid this
# stuff and can load masks directly
white_data = white.get_ws_clone('white_ws_clone')
if self.prop_man.mapmask_ref_ws is None:
ref_ws = white.get_workspace()
else:
ref_ws = self.prop_man.mapmask_ref_ws
idf_file = api.ExperimentInfo.getInstrumentFilename(self.instr_name)
diag_mask = LoadMask(Instrument=idf_file,InputFile=self.hard_mask_file,
OutputWorkspace='hard_mask_ws',RefWorkspace = ref_ws)
#
MaskDetectors(Workspace=white_data, MaskedWorkspace=diag_mask)
white.add_masked_ws(white_data)
DeleteWorkspace(Workspace='white_ws_clone')
DeleteWorkspace(Workspace='hard_mask_ws')
diag_mask = white.get_masking(1)
if out_ws_name is not None:
dm = CloneWorkspace(diag_mask,OutputWorkspace=out_ws_name)
return dm
else:
return None
# Get the white beam vanadium integrals
white_integrals = self.do_white(white, None, None) # No grouping yet
#pylint: disable=access-member-before-definition
if self.second_white:
#TODO: fix THIS DOES NOT WORK!
#pylint: disable=unused-variable
# second_white = self.second_white
other_white_integrals = self.do_white(PropertyManager.second_white, None, None) # No grouping yet
#pylint: disable=attribute-defined-outside-init
self.second_white = other_white_integrals
# return all diagnostics parameters
diag_params = self.prop_man.get_diagnostics_parameters()
# Get the background/total counts from the sample run if present
name_to_clean = None
if diag_sample is not None:
diag_sample = self.get_run_descriptor(diag_sample)
sample_mask = diag_sample.get_masking(1)
if sample_mask is None:
# If the bleed test is requested then we need to pass in the
# sample_run as well
if self.bleed_test:
# initiate reference to reducer to be able to work with Run
# Descriptors
diagnostics.__Reducer__ = self
diag_params['sample_run'] = diag_sample
# Set up the background integrals for diagnostic purposes.
# monitor-2 normalization in multirep mode goes per chunk
if (PropertyManager.incident_energy.multirep_mode() and self.normalise_method == 'monitor-2')\
or self.bleed_test: # bleed test below needs no normalization so normalize cloned workspace
result_ws = diag_sample.get_ws_clone('sample_ws_clone')
wb_normalization_method = white_integrals.getRun().getLogData('DirectInelasticReductionNormalisedBy').value
result_ws = self.normalise(result_ws, wb_normalization_method)
name_to_clean = result_ws.name()
else:
result_ws = self.normalise(diag_sample, self.normalise_method)
#>>>here result workspace is being processed
#-- not touching result ws
bkgd_range = self.background_test_range
bin_size = 2*(bkgd_range[1]-bkgd_range[0])
background_int = Rebin(result_ws,
Params=[bkgd_range[0],bin_size,bkgd_range[1]],
PreserveEvents=False,FullBinsOnly=False, IgnoreBinErrors=True)
total_counts = Integration(result_ws, IncludePartialBins=True)
background_int = ConvertUnits(background_int, Target="Energy",EMode='Elastic', AlignBins=0)
self.prop_man.log("Diagnose: finished convertUnits ",'information')
background_int *= self.scale_factor
diagnostics.normalise_background(background_int, white_integrals,
diag_params.get('second_white',None))
diag_params['background_int'] = background_int
diag_params['sample_counts'] = total_counts
# extract existing white mask if one is defined and provide it for
# diagnose to use instead of constantly diagnosing the same vanadium
white_mask = white.get_masking(1)
if white_mask is None or sample_mask is None:
pass # have to run diagnostics
else:
#Sample mask and white masks are defined.
#nothing to do then
total_mask = sample_mask + white_mask
return total_mask
# Check how we should run diag
diag_spectra_blocks = self.diag_spectra
if white_mask is not None:
diag_params['white_mask'] = white
# keep white mask workspace for further usage
if diag_spectra_blocks is None:
# Do the whole lot at once
white_masked_ws = diagnostics.diagnose(white_integrals, **diag_params)
if white_masked_ws:
white.add_masked_ws(white_masked_ws)
DeleteWorkspace(white_masked_ws)
else:
for bank in diag_spectra_blocks:
diag_params['start_index'] = bank[0] - 1
diag_params['end_index'] = bank[1] - 1
white_masked_ws = diagnostics.diagnose(white_integrals, **diag_params)
if white_masked_ws:
white.add_masked_ws(white_masked_ws)
DeleteWorkspace(white_masked_ws)
if out_ws_name:
if diag_sample is not None:
diag_sample.add_masked_ws(white_integrals)
mask = diag_sample.get_masking(1)
diag_mask = CloneWorkspace(mask,OutputWorkspace=out_ws_name)
else: # either WB was diagnosed or WB masks were applied to it
# Extract the mask workspace
diag_mask, _ = ExtractMask(InputWorkspace=white_integrals,OutputWorkspace=out_ws_name)
else:
diag_mask = None
self.clean_up(diag_params, name_to_clean, white_integrals)
return diag_mask
#-------------------------------------------------------------------------------
# Clean up unrequired workspaces
def clean_up(self, diag_params, name_to_clean, white_integrals):
if 'sample_counts' in diag_params:
DeleteWorkspace(Workspace='background_int')
DeleteWorkspace(Workspace='total_counts')
if 'second_white' in diag_params:
DeleteWorkspace(Workspace=diag_params['second_white'])
if name_to_clean:
DeleteWorkspace(name_to_clean)
if name_to_clean+'_monitors' in mtd:
DeleteWorkspace(name_to_clean+'_monitors')
DeleteWorkspace(Workspace=white_integrals)
#-------------------------------------------------------------------------------
#pylint: disable=too-many-arguments
#pylint: disable=too-many-branches
#pylint: disable=too-many-locals
#pylint: disable=W0621
# flake8: noqa
def convert_to_energy(self,wb_run=None,sample_run=None,ei_guess=None,rebin=None,map_file=None,
monovan_run=None,wb_for_monovan_run=None,**kwargs):
""" One step conversion of run into workspace containing information about energy transfer
"""
# Support for old reduction interface:
self.prop_man.set_input_parameters_ignore_nan\
(wb_run=wb_run,sample_run=sample_run,incident_energy=ei_guess,energy_bins=rebin,
map_file=map_file,monovan_run=monovan_run,wb_for_monovan_run=wb_for_monovan_run)
#
self.prop_man.set_input_parameters(**kwargs)
# output workspace name.
try:
names = funcinspect.lhs_info('names')
out_ws_name = names[0]
#pylint: disable=bare-except
except:
out_ws_name = None
prop_man = self.prop_man
# check if reducer can find all non-run files necessary for the reduction
# and verify some other properties which can be wrong before starting a
# long run.
prop_man.log("****************************************************************")
prop_man.log("*** ISIS CONVERT TO ENERGY TRANSFER WORKFLOW STARTED **********")
prop_man.validate_properties()
prop_man.log("*** Loading or retrieving sample run: {0}".format(prop_man.sample_run))
prop_man.log("****************************************************************")
# before trying to process new results, let's remove from memory old results
# if any present and they are not needed any more (user have not renamed them)
self._clear_old_results()
start_time = time.time()
PropertyManager.sample_run.set_action_suffix('')
sample_ws = PropertyManager.sample_run.get_workspace()
ei_to_process_available = self.calc_incident_energies(PropertyManager, prop_man, sample_ws)
# Update reduction properties which may change in the workspace but have
# not been modified from input parameters.
# E.g. detector number have changed
old_changes = self.prop_man.getChangedProperties()
all_changes = self.prop_man.update_defaults_from_instrument(sample_ws.getInstrument())
workspace_defined_prop = all_changes.difference(old_changes)
if len(workspace_defined_prop) > 0:
prop_man.log("****************************************************************")
prop_man.log('*** Sample run {0} properties change default reduction properties: '.
format(PropertyManager.sample_run.get_workspace().name()))
prop_man.log_changed_values('notice',False,old_changes)
prop_man.log("****************************************************************")
# inform user on what parameters have changed from script or gui
# if monovan present, check if abs_norm_ parameters are set
self.prop_man.log_changed_values('notice')
if not ei_to_process_available:
prop_man.log("*** NO GUESS INCIDENT ENERGIES IDENTIFIED FOR THIS RUN *********")
prop_man.log("*** NOTHING TO REDUCE ******************************************")
prop_man.log("****************************************************************")
return None
masking = None
masks_done = False
if not prop_man.run_diagnostics:
header = "*** Diagnostics including hard masking is skipped "
masks_done = True
#if Reducer.save_and_reuse_masks :
# SAVE AND REUSE MASKS
if self.spectra_masks:
masks_done = True
#--------------------------------------------------------------------------------------------------
# Diagnostics here
# -------------------------------------------------------------------------------------------------
# diag the sample and detector vanadium. It will deal with hard mask only
# if it is set that way
if not masks_done:
masking,header = self._run_diagnostics(prop_man)
else:
header = '*** Using stored mask file for workspace with {0} spectra and {1} masked spectra'
masking = self.spectra_masks
# estimate and report the number of failing detectors
n_masked_spectra = get_failed_spectra_list_from_masks(masking,prop_man)
if masking:
n_spectra = masking.getNumberHistograms()
else:
n_spectra = 0
prop_man.log(header.format(n_spectra,n_masked_spectra),'notice')
#--------------------------------------------------------------------------------------------------
# Remove emtpy background if one is defined
#--------------------------------------------------------------------------------------------------
self.remove_empty_background(masking)
#--------------------------------------------------------------------------------------------------
# now reduction
#--------------------------------------------------------------------------------------------------
# ISIS or GUI motor stuff
psi = PropertyManager.psi.read_psi_from_workspace(sample_ws)
if prop_man.motor_offset is not None and np.isnan(psi):
#logs have a problem
prop_man.log("*** Can not retrieve rotation value from sample environment logs: {0}.\n"
" Rotation angle remains undefined".format(prop_man.motor_log_names))
PropertyManager.psi = None # Just in case
else:
# store psi in property not to retrieve it from workspace again
prop_man.psi = psi
#end
#
if self.monovan_run is not None:
mono_van_cache_num = PropertyManager.monovan_run.run_number()
else:
mono_van_cache_num = None
#Set or clear monovan run number to use in cash ID to return correct
#cashed value of monovan integral
PropertyManager.mono_correction_factor.set_cash_mono_run_number(mono_van_cache_num)
mono_ws_base = None
if PropertyManager.incident_energy.multirep_mode():
self._multirep_mode = True
ws_base = None
if self.check_background:
# find the count rate seen in the regions of the histograms defined
# as the background regions, if the user defined such region.
# In multirep mode this has to be done here, as workspace
# will be cut in chunks and bg regions removed
bkgd_range = self.bkgd_range
self._find_or_build_bkgr_ws(PropertyManager.sample_run,bkgd_range[0],bkgd_range[1])
# initialize list to store resulting workspaces to return
result = []
else:
#pylint: disable=W0201
self._multirep_mode = False
#------------------------------------------------------------------------------------------
# Main loop over incident energies
#------------------------------------------------------------------------------------------
# do not do enumerate if it generates all sequence at once
# -- code below uses current energy state from PropertyManager.incident_energy
AllEn = PropertyManager.incident_energy.getAllEiList()
num_ei_cuts = len(AllEn)
for ind,ei_guess in enumerate(AllEn):
PropertyManager.incident_energy.set_current_ind(ind)
cut_ind =ind + 1 # nice printing convention (1 of 1 rather them 0 of 1)
#---------------
if self._multirep_mode:
tof_range = self.find_tof_range_for_multirep(ws_base)
ws_base = PropertyManager.sample_run.chop_ws_part(ws_base,tof_range,self._do_early_rebinning,
cut_ind,num_ei_cuts)
prop_man.log("*** Processing multirep chunk: #{0}/{1} for provisional energy: {2} meV".
format(cut_ind,num_ei_cuts,ei_guess),'notice')
# do bleed corrections for chunk if necessary
bleed_mask = self._do_bleed_corrections(PropertyManager.sample_run,cut_ind)
if bleed_mask is not None:
mask_ws_name = PropertyManager.sample_run.get_workspace().name()+'_bleed_mask'
RenameWorkspace(bleed_mask,OutputWorkspace=mask_ws_name)
self._old_runs_list.append(mask_ws_name)
else:
# single energy uses single workspace and all TOF are used
tof_range = None
# Do custom preprocessing if such operation is defined
try:
ws_to_preprocess = PropertyManager.sample_run.get_workspace()
ws_to_preprocess = self.do_preprocessing(ws_to_preprocess)
PropertyManager.sample_run.synchronize_ws(ws_to_preprocess)
except AttributeError:
pass
#---------------
#
#Run the conversion first on the sample
deltaE_ws_sample = self.mono_sample(PropertyManager.sample_run,ei_guess,PropertyManager.wb_run,
self.map_file,masking)
#
ei = (deltaE_ws_sample.getRun().getLogData("Ei").value)
# PropertyManager.incident_energy.set_current(ei) let's not do it --
# this makes subsequent calls to this method depend on previous calls
prop_man.log("*** Incident energy found for sample run: {0} meV".format(ei),'notice')
#
# calculate absolute units integral and apply it to the workspace
# or use previously cashed value
cashed_mono_int = PropertyManager.mono_correction_factor.get_val_from_cash(prop_man)
if mono_van_cache_num is not None or self.mono_correction_factor or cashed_mono_int:
deltaE_ws_sample,mono_ws_base = self._do_abs_corrections(deltaE_ws_sample,cashed_mono_int,
ei_guess,mono_ws_base,tof_range, cut_ind,num_ei_cuts)
else:
pass # no absolute units corrections
# ensure that the sample_run name is intact with the sample workspace
PropertyManager.sample_run.synchronize_ws(deltaE_ws_sample)
if prop_man.correct_absorption_on is not None:
abs_shape = prop_man.correct_absorption_on
deltaE_ws_sample = abs_shape.correct_absorption(deltaE_ws_sample,prop_man.abs_corr_info)
#
#
# Do custom post-processing if such operation is defined
try:
deltaE_ws_sample = self.do_postprocessing(deltaE_ws_sample)
PropertyManager.sample_run.synchronize_ws(deltaE_ws_sample)
except AttributeError:
pass
self.save_results(deltaE_ws_sample)
# prepare output workspace
results_name = deltaE_ws_sample.name()
if out_ws_name:
if self._multirep_mode:
result.append(deltaE_ws_sample)
else:
if results_name != out_ws_name: # This actually returns deltaE_ws_sample.name()
# to the state, defined in ADS. Intentionally skip renaming here.
result = PropertyManager.sample_run.synchronize_ws(deltaE_ws_sample)
else:
result = deltaE_ws_sample
else: # delete workspace if no output is requested
result = None
self._old_runs_list.append(results_name)
#end_for
#------------------------------------------------------------------------------------------
# END Main loop over incident energies
#------------------------------------------------------------------------------------------
self.clean_up_convert_to_energy(start_time)
return result
#------------------------------------------------------------------------------------------
def remove_empty_background(self,masking_ws=None):
"""Remove empty background from the workspaces, described by RunDescriptors, specified here
Inputs:
masking_ws -- if provided, mask empty background workspace before extracting it from
workspaces requested
"""
prop_man = self.prop_man
if prop_man.empty_bg_run is None: # Nothing to do. No empty background
return
# list of RunDescriptors to extract and process workspaces
rd_to_process = ['sample_run','wb_run','monovan_run','wb_for_monovan_run']
# list of Background properties
bg_to_process = ['empty_bg_run','empty_bg_run_for_wb','empty_bg_run_for_monovan','empty_bg_run_for_monoWb']
for rd_name,bg_rd_name in zip(rd_to_process,bg_to_process):
rd_prop = prop_man.get_prop_class(rd_name)
bg_prop = prop_man.get_prop_class(bg_rd_name)
empty_bg_ws = bg_prop.get_workspace()
if masking_ws is not None and empty_bg_ws is not None:
MaskDetectors(empty_bg_ws, MaskedWorkspace=masking_ws)
rd_prop.remove_empty_background(empty_bg_ws)
#------------------------------------------------------------------------------------------
# Handles cleanup of the convert_to_energy method
def clean_up_convert_to_energy(self, start_time):
#Must! clear background ws (if present in multirep) to calculate background
#source for next workspace
if 'bkgr_ws_source' in mtd:
DeleteWorkspace('bkgr_ws_source')
# clear combined mask
self.spectra_masks = None
end_time = time.time()
self.prop_man.log("*** ISIS CONVERT TO ENERGY TRANSFER WORKFLOW FINISHED *********")
self.prop_man.log("*** Elapsed time : {0:>9.2f} sec *********".
format(end_time - start_time),'notice')
self.prop_man.log("****************************************************************")
#------------------------------------------------------------------------------------------
# Check auto-ei mode and calculate incident energies if necessary
# Returns if there is a processible Ei
def calc_incident_energies(self, PropertyManager, prop_man, sample_ws):
if PropertyManager.incident_energy.autoEi_mode():
mon_ws = PropertyManager.sample_run.get_monitors_ws()
# sum monitor spectra if this is requested
ei_mon_spec = self.ei_mon_spectra
if PropertyManager.ei_mon_spectra.need_to_sum_monitors(prop_man):
ei_mon_spec,mon_ws = self.sum_monitors_spectra(mon_ws,ei_mon_spec)
sample_ws.setMonitorWorkspace(mon_ws)
else:
pass
try:
PropertyManager.incident_energy.set_auto_Ei(mon_ws,prop_man,ei_mon_spec)
return True
except RuntimeError as er:
prop_man.log('*** Error while calculating autoEi: {0}. See algorithm log for details.'.
format(str(er)))
return False
else:
return True
#------------------------------------------------------------------------------------------
#pylint: disable=too-many-arguments
def _do_abs_corrections(self,deltaE_ws_sample,cashed_mono_int,ei_guess,
mono_ws_base,tof_range, cut_ind,num_ei_cuts):
"""Do absolute corrections using various sources of such corrections
cashed, provided or calculated from monovan ws
"""
# do not remove background from vanadium (sample background is not
# fit for that anyway)
#pylint: disable=E0203
current_bkg_opt = self.check_background
#pylint: disable=attribute-defined-outside-init
self.check_background = False
# what we want to do with absolute units:
#pylint: disable=E0203
if self.mono_correction_factor: # Correction provided. Just apply it
deltaE_ws_sample = self.apply_absolute_normalization(deltaE_ws_sample,PropertyManager.monovan_run,
ei_guess,PropertyManager.wb_for_monovan_run,
' provided ')
elif cashed_mono_int: # Correction cashed from previous run
#pylint: disable=attribute-defined-outside-init
self.mono_correction_factor = cashed_mono_int
deltaE_ws_sample = self.apply_absolute_normalization(deltaE_ws_sample,PropertyManager.monovan_run,
ei_guess,PropertyManager.wb_for_monovan_run,
' -cached- ')
#pylint: disable=attribute-defined-outside-init
self.mono_correction_factor = None
else: # Calculate corrections
if self._multirep_mode:
mono_ws_base = PropertyManager.monovan_run.chop_ws_part(mono_ws_base,tof_range,
self._do_early_rebinning, cut_ind,num_ei_cuts)
# its pointless to do bleed for monovan run
#self._do_bleed_corrections(PropertyManager.monovan_run,cut_ind)
deltaE_ws_sample = self.apply_absolute_normalization(deltaE_ws_sample,PropertyManager.monovan_run,
ei_guess,PropertyManager.wb_for_monovan_run,
'calculated')
# monovan workspace has been corrupted in memory after
# calculations and result placed in cash. Workspace unsuitable
# for further calculations. Mark it cashed not to verify presence on consecutive runs
# with the same monovan ws
#pylint: disable=protected-access
PropertyManager.monovan_run._in_cash = True
#pylint: disable=attribute-defined-outside-init
self.check_background = current_bkg_opt
return deltaE_ws_sample,mono_ws_base
def _run_diagnostics(self,prop_man):
"""Internal diagnostics procedure used over two workspaces, used by convert_to_energy"""
prop_man.log("======== Run diagnose for sample run ===========================",'notice')
masking = self.diagnose(PropertyManager.wb_run,PropertyManager.mask_run,
second_white=None,print_diag_results=True)
if prop_man.use_hard_mask_only:
header = "*** Hard mask file applied to workspace with {0:d} spectra masked {1:d} spectra"
else:
header = "*** Diagnostics processed workspace with {0:d} spectra and masked {1:d} bad spectra"
# diagnose absolute units:
if self.monovan_run is not None :
if self.mono_correction_factor is None :
if self.use_sam_msk_on_monovan:
prop_man.log(' Applying sample run mask to mono van')
else:
# in this case the masking2 is different but points to the
# same workspace Should be better solution for that
if not self.use_hard_mask_only :
prop_man.log("======== Run diagnose for monochromatic vanadium run ===========",'notice')
masking2 = self.diagnose(PropertyManager.wb_for_monovan_run,PropertyManager.monovan_run,
second_white = None,print_diag_results=True)
masking += masking2
DeleteWorkspace(masking2)
else: # if Reducer.mono_correction_factor != None :
pass
else:
pass
# Very important statement propagating masks for further usage in
# convert_to_energy.
# This property is also directly accessible from GUI.
self.spectra_masks = masking
# save mask if it does not exist and has been already loaded
#if Reducer.save_and_reuse_masks and not masks_done:
# SaveMask(InputWorkspace=masking,OutputFile =
# mask_file_name,GroupedDetectors=True)
return masking,header
def do_white(self, run, spectra_masks=None, map_file=None):
"""
Create the workspace, which each spectra containing the correspondent white beam integral (single value)
These integrals are used as estimate for detector efficiency in wide range of energies
(rather the detector electronic's efficiency as the geuger counters are very different in efficiency)
and is used to remove the influence of this efficiency to the different detectors.
"""
white_ws = self._get_wb_inegrals(run)
# Masks may change in different runs
# Masking and grouping
white_ws = self.remap(white_ws, spectra_masks, map_file)
# White beam scale factor
white_ws *= self.wb_scale_factor
return white_ws
def _do_bleed_corrections(self,sample_run,nchunk):
"""Calculate TOF-chunk specific bleed corrections, necessary in mutlirep mode
"""
if not self.prop_man.diag_bleed_test:
return None
CUR_bleed_masks, failures = diagnostics.do_bleed_test(sample_run, self.prop_man.bleed_maxrate, self.prop_man.bleed_pixels)
if failures > 0:
diagnostics.add_masking(sample_run.get_workspace(),CUR_bleed_masks)
bleed_mask = CUR_bleed_masks
else:
DeleteWorkspace(CUR_bleed_masks)
bleed_mask = None
self.prop_man.log("*** Bleeding test for chunk #{0} masked {1} pixels".format(nchunk,failures),'notice')
return bleed_mask
#pylint: disable=too-many-arguments
def mono_sample(self, mono_run, ei_guess, white_run=None, map_file=None,
spectra_masks=None, result_name=None, Tzero=None):
"""Convert a mono-chromatic sample run to DeltaE.
"""
mono_run = self.get_run_descriptor(mono_run)
if white_run:
white_run = self.get_run_descriptor(white_run)
mono_s = self._do_mono(mono_run, ei_guess,
white_run, map_file, spectra_masks, Tzero)
# at this stage we would never need monitors for this workspace if they
# were actually there
mono_run.clear_monitors()
return mono_s
#-------------------------------------------------------------------------------
def sum_monitors_spectra(self,monitor_ws,ei_mon_spectra):
"""Sum monitors spectra for all spectra, specified in the spectra list(s)
and create monitor workspace containing only the spectra summed and organized
according to ei_mon_spectra tuple.
Returns tuple of two spectra numbers, containing in the
new summed monitors workspace and pointer to the new workspace itself.
"""
existing_list = process_prop_list(monitor_ws,"CombinedSpectraIDList")
monitor_ws_name = monitor_ws.name()
if len(existing_list) == 0:
spectra_list1=ei_mon_spectra[0]
spectra_list2=ei_mon_spectra[1]
if not isinstance(spectra_list1,list):
spectra_list1 = [spectra_list1]
spec_id1 = self._process_spectra_list(monitor_ws,spectra_list1,'spectr_ws1')
if not isinstance(spectra_list2,list):
spectra_list2 = [spectra_list2]
spec_id2 = self._process_spectra_list(monitor_ws,spectra_list2,'spectr_ws2')
# Are other monitors necessary?
mon_list = self.prop_man.get_used_monitors_list()
spectra_needed=[]
monitors_left=[]
for mon_id in mon_list:
if not(mon_id in spectra_list1 or mon_id in spectra_list2):
wsInd = monitor_ws.getIndexFromSpectrumNumber(int(mon_id))
monitors_left.append(int(mon_id))
spectra_needed.append(int(wsInd))
n_other_mon = len(spectra_needed)
if n_other_mon > 0:
ExtractSpectra(InputWorkspace=monitor_ws,OutputWorkspace='_OtherMon',
WorkspaceIndexList=spectra_needed)
else:
pass
# Deal with summed monitors
DeleteWorkspace(monitor_ws_name)
ConjoinWorkspaces(InputWorkspace1='spectr_ws1',InputWorkspace2='spectr_ws2')
RenameWorkspace(InputWorkspace='spectr_ws1',OutputWorkspace=monitor_ws_name)
if '_OtherMon' in mtd:
ConjoinWorkspaces(InputWorkspace1=monitor_ws_name,InputWorkspace2='_OtherMon')
else:
pass
monitor_ws = mtd[monitor_ws_name]
AddSampleLog(monitor_ws,LogName='CombinedSpectraIDList',
LogText=str(monitors_left+spectra_list1+spectra_list2),LogType='String')
else:
pass
# Weird operation. It looks like the spectra numbers obtained from
# AppendSpectra operation depend on instrument.
# Looks like a bug in AppendSpectra
spec_id1 = monitor_ws.getSpectrum(0).getSpectrumNo()
spec_id2 = monitor_ws.getSpectrum(1).getSpectrumNo()
return (spec_id1,spec_id2),monitor_ws
#
def _process_spectra_list(self,workspace,spectra_list,target_ws_name='SpectraWS'):
"""Method moves all detectors of the spectra list into the same position and
sums the specified spectra in the workspace"""
detPos=None
wsIDs=list()
for spec_id in spectra_list:
specID = workspace.getIndexFromSpectrumNumber(spec_id)
if detPos is None:
first_detector = workspace.getDetector(specID)
detPos = first_detector.getPos()
else:
psp = workspace.getSpectrum(specID)
detIDs = psp.getDetectorIDs()
for detID in detIDs:
MoveInstrumentComponent(Workspace=workspace,ComponentName= 'Detector',
DetectorID=detID,X=detPos.getX(),Y=detPos.getY(),
Z=detPos.getZ(),RelativePosition=False)
wsIDs.append(specID)
if len(spectra_list) == 1:
ExtractSingleSpectrum(InputWorkspace=workspace,OutputWorkspace=target_ws_name,
WorkspaceIndex=wsIDs[0])
else:
SumSpectra(InputWorkspace=workspace,OutputWorkspace=target_ws_name,
ListOfWorkspaceIndices=wsIDs)
ws = mtd[target_ws_name]
sp = ws.getSpectrum(0)
spectrum_num = sp.getSpectrumNo()
return spectrum_num
#-------------------------------------------------------------------------------
def get_ei(self, data_run, ei_guess):
""" Calculate incident energy of neutrons and the time of the of the
peak in the monitor spectrum
The X data is corrected to set the first monitor peak at t=0 by subtracting
t_mon + t_det_delay
where the detector delay time is retrieved from the the instrument
The position of the "source" component is also moved to match the position of
the first monitor
"""
data_run = self.get_run_descriptor(data_run)
fix_ei = self.fix_ei
ei_mon_spectra = self.ei_mon_spectra
data_ws = data_run.get_workspace()
monitor_ws = data_run.get_monitors_ws()
if monitor_ws is None:
raise RuntimeError("Can not find monitors workspace for workspace {0}, run N{1}".
format(data_ws.name(),data_ws.getRunNumber()))
separate_monitors = data_run.is_monws_separate()
data_run.set_action_suffix('_shifted')
# sum monitor spectra if this is requested
if PropertyManager.ei_mon_spectra.need_to_sum_monitors(self.prop_man):
ei_mon_spectra,monitor_ws = self.sum_monitors_spectra(monitor_ws,ei_mon_spectra)
data_ws.setMonitorWorkspace(monitor_ws)
# Calculate the incident energy
#Returns: ei,mon1_peak,mon1_index,tzero
ei,mon1_peak,_,_ = \
GetEi(InputWorkspace=monitor_ws, Monitor1Spec=ei_mon_spectra[0],
Monitor2Spec=ei_mon_spectra[1],
EnergyEstimate=ei_guess,FixEi=fix_ei)
SetInstrumentParameter(data_ws,ParameterName='EFixed',ParameterType='Number', Value='{0}'.format(ei))
# Store found incident energy in the class itself
if self.prop_man.normalise_method == 'monitor-2' and not separate_monitors:
# monitor-2 normalization ranges have to be identified before the
# instrument is shifted in case it is shifted to this monitor (usual
# case)
#Find TOF range, correspondent to incident energy monitor peak
energy_rage = self.mon2_norm_energy_range
#pylint: disable=attribute-defined-outside-init
self._mon2_norm_time_range = self.get_TOF_for_energies(monitor_ws,energy_rage,
[self.mon2_norm_spec],None,self._debug_mode)
#end
if separate_monitors:
# copy incident energy obtained on monitor workspace to detectors
# workspace
AddSampleLog(Workspace=data_ws,LogName='Ei',LogText='{0:.10f}'.format(ei),LogType='Number')
resultws_name = data_ws.name()
# Adjust the TOF such that the first monitor peak is at t=0
ScaleX(InputWorkspace=data_ws,OutputWorkspace=resultws_name,Operation="Add",Factor=-mon1_peak,
InstrumentParameter="DelayTime",Combine=True)
# shift to monitor used to calculate energy transfer
spec_num = monitor_ws.getIndexFromSpectrumNumber(ei_mon_spectra[0])
mon1_det = monitor_ws.getDetector(spec_num)
mon1_pos = mon1_det.getPos()
src_name = data_ws.getInstrument().getSource().getName()
MoveInstrumentComponent(Workspace=resultws_name,ComponentName= src_name, X=mon1_pos.getX(),
Y=mon1_pos.getY(), Z=mon1_pos.getZ(), RelativePosition=False)
#
data_run.synchronize_ws(mtd[resultws_name])
return ei, mon1_peak
#-------------------------------------------------------------------------------
def remap(self, result_ws, spec_masks, map_file):
"""
Mask and group detectors based on input parameters
"""
ws_name = result_ws.name()
if spec_masks is not None:
MaskDetectors(Workspace=ws_name, MaskedWorkspace=spec_masks)
if map_file is not None:
GroupDetectors(InputWorkspace=ws_name,OutputWorkspace=ws_name,
MapFile= map_file, KeepUngroupedSpectra=0, Behaviour='Average')
return mtd[ws_name]
#-------------------------------------------------------------------------------
def normalise(self, run, method, range_offset=0.0,external_monitors_ws=None):
""" Apply normalization using specified source
"""
run = self.get_run_descriptor(run)
data_ws = run.get_workspace()
old_ws_name = data_ws.name()
result_name = run.set_action_suffix('_normalized')
# Make sure we don't call this twice
done_log = "DirectInelasticReductionNormalisedBy"
if done_log in data_ws.getRun() or method is None:
run.synchronize_ws(data_ws)
output = mtd[result_name]
return output
method = method.lower()
for case in common.switch(method):
if case('monitor-1'):
method,old_ws_name = self._normalize_to_monitor1(run,old_ws_name, range_offset,external_monitors_ws)
break
if case('monitor-2'):
method,old_ws_name = self._normalize_to_monitor2(run,old_ws_name, range_offset,external_monitors_ws)
break
if case('current'):
NormaliseByCurrent(InputWorkspace=old_ws_name,OutputWorkspace=old_ws_name)
# NormalizationFactor log has been added by the algorithm themselves.
break
if case(): # default
raise RuntimeError("""Normalization method {0} not found.
It must be one of monitor-1, monitor-2, current, or None""".
format(method))
#endCase
# Add a log to the workspace to say that the normalization has been
# done
output = mtd[old_ws_name]