diff --git a/Framework/DataHandling/src/CompressEvents.cpp b/Framework/DataHandling/src/CompressEvents.cpp index 777d58da6fd8..55b7257bddc4 100644 --- a/Framework/DataHandling/src/CompressEvents.cpp +++ b/Framework/DataHandling/src/CompressEvents.cpp @@ -12,6 +12,8 @@ #include "MantidKernel/BoundedValidator.h" #include "MantidKernel/DateAndTimeHelpers.h" #include "MantidKernel/DateTimeValidator.h" +#include "MantidKernel/EnumeratedString.h" +#include "MantidKernel/ListValidator.h" #include "tbb/parallel_for.h" @@ -26,6 +28,12 @@ using namespace Kernel; using namespace API; using namespace DataObjects; +namespace { +const std::vector binningModeNames{"Default", "Linear", "Logarithmic"}; +enum class BinningMode { DEFAULT, LINEAR, LOGARITHMIC, enum_count }; +typedef Mantid::Kernel::EnumeratedString BINMODE; +} // namespace + void CompressEvents::init() { declareProperty(std::make_unique>("InputWorkspace", "", Direction::Input), "The name of the EventWorkspace on which to perform the algorithm"); @@ -36,10 +44,12 @@ void CompressEvents::init() { // Tolerance must be >= 0.0 auto mustBePositive = std::make_shared>(); mustBePositive->setLower(0.0); - declareProperty(std::make_unique>("Tolerance", 1e-5, mustBePositive, Direction::Input), + declareProperty(std::make_unique>("Tolerance", 1e-5, Direction::Input), "The tolerance on each event's X value (normally TOF, but may be a " "different unit if you have used ConvertUnits).\n" - "Any events within Tolerance will be summed into a single event."); + "Any events within Tolerance will be summed into a single event. When compressing where positive is " + "linear tolerance, negative is logorithmic tolerance, and zero indicates that time-of-flight must be " + "identical to compress."); declareProperty( std::make_unique>("WallClockTolerance", EMPTY_DBL(), mustBePositive, Direction::Input), @@ -54,15 +64,28 @@ void CompressEvents::init() { "starting filtering. Ignored if WallClockTolerance is not specified. " "Default is start of run", Direction::Input); + + declareProperty("BinningMode", binningModeNames[size_t(BinningMode::DEFAULT)], + std::make_shared(binningModeNames), + "Binning behavior can be specified in the usual way through sign of tolerance and other properties " + "('Default'); or can be set to one of the allowed binning modes. This will override all other " + "specification or default behavior."); } void CompressEvents::exec() { // Get the input workspace EventWorkspace_sptr inputWS = getProperty("InputWorkspace"); EventWorkspace_sptr outputWS = getProperty("OutputWorkspace"); - const double toleranceTof = getProperty("Tolerance"); + double toleranceTof = getProperty("Tolerance"); const double toleranceWallClock = getProperty("WallClockTolerance"); const bool compressFat = !isEmpty(toleranceWallClock); + + BINMODE mode = getPropertyValue("BinningMode"); + if (mode == BinningMode::LINEAR) + toleranceTof = std::fabs(toleranceTof); + else if (mode == BinningMode::LOGARITHMIC) + toleranceTof = -1. * std::fabs(toleranceTof); + Types::Core::DateAndTime startTime; if (compressFat) { diff --git a/Framework/DataHandling/test/CompressEventsTest.h b/Framework/DataHandling/test/CompressEventsTest.h index 27e2cc2f4285..3b64483b9858 100644 --- a/Framework/DataHandling/test/CompressEventsTest.h +++ b/Framework/DataHandling/test/CompressEventsTest.h @@ -20,6 +20,7 @@ using namespace Mantid::DataHandling; using namespace Mantid::API; using namespace Mantid::Geometry; using namespace Mantid::DataObjects; +using namespace Mantid::Types::Core; class CompressEventsTest : public CxxTest::TestSuite { public: @@ -32,7 +33,6 @@ class CompressEventsTest : public CxxTest::TestSuite { void test_InvalidInputs() { CompressEvents alg; TS_ASSERT_THROWS_NOTHING(alg.initialize()); - TS_ASSERT_THROWS(alg.setPropertyValue("Tolerance", "-1.0"), const std::invalid_argument &); TS_ASSERT_THROWS_NOTHING(alg.setPropertyValue("Tolerance", "0.0")); } @@ -128,4 +128,75 @@ class CompressEventsTest : public CxxTest::TestSuite { void test_InPlace_ZeroTolerance_WithPulseTime() { doTest("CompressEvents_input", "CompressEvents_input", 0.0, 50, .001); } + + void doLogarithmicTest(const std::string &binningMode, const double tolerance, double wallClockTolerance = 0.) { + EventWorkspace_sptr input, output; + EventType eventType = WEIGHTED_NOTIME; + if (wallClockTolerance > 0.) + eventType = WEIGHTED; + + /** Create event workspace with: + * 1 pixels (or another number) + * 64 histogrammed bins from 0.0 in steps of 1.0 + * 128 events; two in each bin, at time 1.0, 2.0, etc. + * PulseTime = 1 second, 2 seconds, etc. + */ + input = WorkspaceCreationHelper::createEventWorkspace(1, 64, 64, 0, 1, 2); + AnalysisDataService::Instance().addOrReplace("CompressEvents_input", input); + + TS_ASSERT_EQUALS(input->getNumberEvents(), 128); + const double inputIntegral = input->getSpectrum(0).integrate(0., 100., true); + + CompressEvents alg; + alg.initialize(); + alg.setPropertyValue("InputWorkspace", "CompressEvents_input"); + alg.setPropertyValue("OutputWorkspace", "CompressEvents_output"); + alg.setProperty("Tolerance", tolerance); + alg.setPropertyValue("BinningMode", binningMode); + if (wallClockTolerance > 0.) { + alg.setProperty("WallClockTolerance", wallClockTolerance); + alg.setProperty("StartTime", + "2010-01-01T00:00:00"); // copied from createEventWorkspace + } + + TS_ASSERT_THROWS_NOTHING(alg.execute()); + TS_ASSERT(alg.isExecuted()); + + output = AnalysisDataService::Instance().retrieveWS("CompressEvents_output"); + + TS_ASSERT_EQUALS(output->getNumberEvents(), 7); + TS_ASSERT_EQUALS(output->getEventType(), eventType); + TS_ASSERT_DELTA(output->getSpectrum(0).integrate(0., 100., true), inputIntegral, 1.e-6); + + EventList el = output->getSpectrum(0); + TS_ASSERT_DELTA(el.getEvent(0).weight(), 2.0, 1e-6); + TS_ASSERT_DELTA(el.getEvent(0).errorSquared(), 2.0, 1e-6); + TS_ASSERT_DELTA(el.getEvent(0).tof(), 0.5, 1e-6); + for (int i = 1; i < 7; i++) { + TS_ASSERT_DELTA(el.getEvent(i).weight(), 1.0 * pow(2, i), 1e-6); + TS_ASSERT_DELTA(el.getEvent(i).errorSquared(), 1.0 * pow(2, i), 1e-6); + TS_ASSERT_DELTA(el.getEvent(i).tof(), 0.75 * pow(2, i), 1e-6); + } + + if (wallClockTolerance > 0.) { + const auto startTime = DateAndTime("2010-01-01T00:00:00"); + TS_ASSERT_EQUALS(el.getEvent(0).pulseTime(), startTime); + TS_ASSERT_EQUALS(el.getEvent(1).pulseTime(), startTime + 1.0); + TS_ASSERT_EQUALS(el.getEvent(2).pulseTime(), startTime + 2.5); + TS_ASSERT_EQUALS(el.getEvent(3).pulseTime(), startTime + 5.5); + TS_ASSERT_EQUALS(el.getEvent(4).pulseTime(), startTime + 11.5); + TS_ASSERT_EQUALS(el.getEvent(5).pulseTime(), startTime + 23.5); + TS_ASSERT_EQUALS(el.getEvent(6).pulseTime(), startTime + 47.5); + } else { + const auto timeZero = DateAndTime{0}; + for (int i = 0; i < 7; i++) { + TS_ASSERT_EQUALS(el.getEvent(i).pulseTime(), timeZero); + } + } + } + + void test_Logarithmic_binning() { doLogarithmicTest("Logarithmic", 1.); } + void test_Logarithmic_binning_default() { doLogarithmicTest("Default", -1.); } + void test_Logarithmic_binning_WithPulseTime() { doLogarithmicTest("Logarithmic", 1., 64); } + void test_Logarithmic_binning_default_WithPulseTime() { doLogarithmicTest("Default", -1., 64); } }; diff --git a/Framework/DataObjects/src/EventList.cpp b/Framework/DataObjects/src/EventList.cpp index c9b7f30e1d9d..b6dbefee7d16 100644 --- a/Framework/DataObjects/src/EventList.cpp +++ b/Framework/DataObjects/src/EventList.cpp @@ -1450,7 +1450,7 @@ inline double calcNorm(const double errorSquared) { * @param events :: input event list. * @param out :: output WeightedEventNoTime vector. * @param tolerance :: how close do two event's TOF have to be to be considered - *the same. + *the same. Negative implies log grouping. */ template @@ -1462,7 +1462,7 @@ inline void EventList::compressEventsHelper(const std::vector &events, std::v out.reserve(events.size() / 20); // The last TOF to which we are comparing. - double lastTof = std::numeric_limits::lowest(); + double lastTof = events.front().m_tof; // For getting an accurate average TOF double totalTof = 0; int num = 0; @@ -1471,8 +1471,36 @@ inline void EventList::compressEventsHelper(const std::vector &events, std::v double errorSquared = 0; double normalization = 0.; + double bin_end = lastTof; + std::function compareTof; + std::function next_bin; + + if (tolerance < 0) { // log + if (lastTof < 0) + throw std::runtime_error("compressEvents with log binning doesn't work with negative TOF"); + + if (lastTof == 0) + bin_end = fabs(tolerance); + + // for log we do "less than" so that is matches the log binning of the Rebin algorithm + compareTof = [](const double lhs, const double rhs) { return lhs < rhs; }; + next_bin = [tolerance](const double lastTof, double bin_end) { + // advance the bin_end until we find the one that this next event falls into + while (lastTof >= bin_end) + bin_end = bin_end * (1 - tolerance); + return bin_end; + }; + } else { // linear + // for linear we do "less than or equals" because that is how it was originally implemented + compareTof = [](const double lhs, const double rhs) { return lhs <= rhs; }; + next_bin = [tolerance](const double lastTof, double) { return lastTof + tolerance; }; + } + + // get first bin_end + bin_end = next_bin(lastTof, bin_end); + for (auto it = events.cbegin(); it != events.cend(); it++) { - if ((it->m_tof - lastTof) <= tolerance) { + if (compareTof(it->m_tof, bin_end)) { // Carry the error and weight weight += it->weight(); errorSquared += it->errorSquared(); @@ -1499,6 +1527,8 @@ inline void EventList::compressEventsHelper(const std::vector &events, std::v weight = it->weight(); errorSquared = it->errorSquared(); lastTof = it->m_tof; + + bin_end = next_bin(lastTof, bin_end); } } @@ -1528,7 +1558,7 @@ inline void EventList::compressFatEventsHelper(const std::vector &events, std out.reserve(events.size() / 20); // The last TOF to which we are comparing. - double lastTof = std::numeric_limits::lowest(); + double lastTof = events.front().m_tof; // For getting an accurate average TOF double totalTof = 0; @@ -1558,10 +1588,46 @@ inline void EventList::compressFatEventsHelper(const std::vector &events, std // bin if the pulses are histogrammed int64_t lastPulseBin = (it->m_pulsetime.totalNanoseconds() - pulsetimeStart) / pulsetimeDelta; + + double bin_end = lastTof; + double tof_min{0}; + std::function compareTof; + std::function next_bin; + + if (tolerance < 0) { // log + // for log we do "less than" so that is matches the log binning of the Rebin algorithm + compareTof = [](const double lhs, const double rhs) { return lhs < rhs; }; + next_bin = [tolerance](const double lastTof, double bin_end) { + // advance the bin_end until we find the one that this next event falls into + while (lastTof >= bin_end) + bin_end = bin_end * (1 - tolerance); + return bin_end; + }; + + // get minimum Tof so that binning is consistent across all pulses + const auto event_min = std::min_element( + events.cbegin(), events.cend(), [](const auto &left, const auto &right) { return left.tof() < right.tof(); }); + bin_end = tof_min = event_min->tof(); + + if (tof_min < 0) + throw std::runtime_error("compressEvents with log binning doesn't work with negative TOF"); + + if (tof_min == 0) + bin_end = fabs(tolerance); + + } else { // linear + // for linear we do "less than or equals" because that is how it was originally implemented + compareTof = [](const double lhs, const double rhs) { return lhs <= rhs; }; + next_bin = [tolerance](const double lastTof, double) { return lastTof + tolerance; }; + } + + // get first bin_end + bin_end = next_bin(lastTof, bin_end); + // loop through events and accumulate weight for (; it != events.cend(); ++it) { const int64_t eventPulseBin = (it->m_pulsetime.totalNanoseconds() - pulsetimeStart) / pulsetimeDelta; - if ((eventPulseBin <= lastPulseBin) && (std::fabs(it->m_tof - lastTof) <= tolerance)) { + if ((eventPulseBin <= lastPulseBin) && compareTof(it->m_tof, bin_end)) { // Carry the error and weight weight += it->weight(); errorSquared += it->errorSquared(); @@ -1585,6 +1651,10 @@ inline void EventList::compressFatEventsHelper(const std::vector &events, std errorSquared); } } + if (tolerance < 0 && eventPulseBin != lastPulseBin) + // reset the bin_end for the new pulse bin + bin_end = tof_min; + // Start a new combined object double norm = calcNorm(it->errorSquared()); totalTof = it->m_tof * norm; @@ -1597,6 +1667,8 @@ inline void EventList::compressFatEventsHelper(const std::vector &events, std pulsetimes.emplace_back(it->m_pulsetime); pulsetimeWeights.clear(); pulsetimeWeights.emplace_back(norm); + + bin_end = next_bin(lastTof, bin_end); } } diff --git a/Framework/DataObjects/test/EventListTest.h b/Framework/DataObjects/test/EventListTest.h index bc20e59bd6fd..427840ef8241 100644 --- a/Framework/DataObjects/test/EventListTest.h +++ b/Framework/DataObjects/test/EventListTest.h @@ -2159,6 +2159,253 @@ class EventListTest : public CxxTest::TestSuite { // last value } + void test_compressEvents_log() { + this->fake_uniform_data(10000.); + + // First lets compare histogramming with compression versus without. + // This may only work exactly when the smallest tof is equal to the minimum rebin parameter, + // in this case 100 + MantidVec X, expected_Y, expected_E, Y, E; + VectorHelper::createAxisFromRebinParams({100., -1., 819200}, X, true); + + // get expected results + el.generateHistogram(X, expected_Y, expected_E); + + // do compress events with log binning then histogram to compare to un-compressed + EventList el_output; + TS_ASSERT_THROWS_NOTHING(el.compressEvents(-1, &el_output)); + el_output.generateHistogram(X, Y, E); + + for (size_t i = 0; i < Y.size(); i++) { + TS_ASSERT_EQUALS(expected_Y[i], Y[i]); + TS_ASSERT_EQUALS(expected_E[i], E[i]); + } + + // now check individual events + TS_ASSERT_EQUALS(el.getNumberEvents(), 9999900); + TS_ASSERT_EQUALS(el_output.getNumberEvents(), 17); + + // event weights should double for each one, 100, 200, 400, 800... + // event tofs should double, it will be roughly 150, 300, 600, 1200... + + // don't check last event as bin will be partially filled + for (int i = 0; i < 16; i++) { + TS_ASSERT_EQUALS(el_output.getEvent(i).weight(), 100 * pow(2, i)) + TS_ASSERT_EQUALS(el_output.getEvent(i).errorSquared(), 100 * pow(2, i)) + TS_ASSERT_DELTA(el_output.getEvent(i).tof(), 150 * pow(2, i), 0.5) + } + } + + void test_compressEvents_log2() { + // Check that for very sparse events + // Add four events, one that is repeated + // Should result in 3 events, with the second of weight 2 + el = EventList(); + el += TofEvent(100, 0); + el += TofEvent(1000, 0); + el += TofEvent(1000, 0); + el += TofEvent(100000, 0); + + // do compress events with log binning + EventList el_output; + TS_ASSERT_THROWS_NOTHING(el.compressEvents(-0.01, &el_output)) + + // now check individual events + TS_ASSERT_EQUALS(el_output.getNumberEvents(), 3) + + TS_ASSERT_EQUALS(el_output.getEvent(0).weight(), 1) + TS_ASSERT_EQUALS(el_output.getEvent(0).errorSquared(), 1) + TS_ASSERT_DELTA(el_output.getEvent(0).tof(), 100, 1e-5) + + TS_ASSERT_EQUALS(el_output.getEvent(1).weight(), 2) + TS_ASSERT_EQUALS(el_output.getEvent(1).errorSquared(), 2) + TS_ASSERT_DELTA(el_output.getEvent(1).tof(), 1000, 1e-5) + + TS_ASSERT_EQUALS(el_output.getEvent(2).weight(), 1) + TS_ASSERT_EQUALS(el_output.getEvent(2).errorSquared(), 1) + TS_ASSERT_DELTA(el_output.getEvent(2).tof(), 100000, 1e-5) + } + + void test_compressEvents_log3() { + // check the behavior when TOF is zero or negative + el = EventList(); + el += TofEvent(0, 0); + el += TofEvent(0.5, 0); + el += TofEvent(1, 0); + + // Do compress events with log binning + // Since there is a tof==0 then the first bin_end should be 1 + EventList el_output; + TS_ASSERT_THROWS_NOTHING(el.compressEvents(-1, &el_output)) + + // now check individual events + TS_ASSERT_EQUALS(el_output.getNumberEvents(), 2) + + TS_ASSERT_EQUALS(el_output.getEvent(0).weight(), 2) + TS_ASSERT_EQUALS(el_output.getEvent(0).errorSquared(), 2) + TS_ASSERT_DELTA(el_output.getEvent(0).tof(), 0.25, 1e-5) + + TS_ASSERT_EQUALS(el_output.getEvent(1).weight(), 1) + TS_ASSERT_EQUALS(el_output.getEvent(1).errorSquared(), 1) + TS_ASSERT_DELTA(el_output.getEvent(1).tof(), 1, 1e-5) + + // now add a negative TOF and it should throw + el += TofEvent(-1, 0); + TS_ASSERT_THROWS(el.compressEvents(-1, &el_output), const std::runtime_error &) + } + + void test_compressFatEvents_log() { + el = EventList(); + for (int pulseTime = 0; pulseTime < 5; pulseTime++) + for (double tof = 100; tof < 51200; tof++) + el += TofEvent(tof, DateAndTime{pulseTime, 0}); + + // First lets compare histogramming with compression versus without. + // This may only work exactly when the smallest tof is equal to the minimum rebin parameter, + // in this case 100 + MantidVec X, expected_Y, expected_E, Y, E; + VectorHelper::createAxisFromRebinParams({100., -1., 51200}, X, true); + + // get expected results + el.generateHistogram(X, expected_Y, expected_E); + + /// do compress events with log binning + EventList el_output; + TS_ASSERT_THROWS_NOTHING(el.compressFatEvents(-1, DateAndTime{0}, 2., &el_output)); + + // check individual events + // with a pulsetime delta of 2 seconds we should end up with 3 pulsetime groups with 9 events in each group, so 27 + // events total + + TS_ASSERT_EQUALS(el.getNumberEvents(), 255500); + TS_ASSERT_EQUALS(el_output.getNumberEvents(), 27); + + // First pulse group with pulsetime = 500ms + // Event weights should double for each one, 200, 400, 800, 1600... + // Event tofs should double, it will be roughly 150, 300, 600, 1200... + for (int i = 0; i < 9; i++) { + TS_ASSERT_EQUALS(el_output.getEvent(i).pulseTime().totalNanoseconds(), 500000000) + TS_ASSERT_EQUALS(el_output.getEvent(i).weight(), 200 * pow(2, i)) + TS_ASSERT_EQUALS(el_output.getEvent(i).errorSquared(), 200 * pow(2, i)) + TS_ASSERT_DELTA(el_output.getEvent(i).tof(), 150 * pow(2, i), 0.5) + } + + // Second pulse group with pulsetime = 2500ms + // Event weights should double for each one, 200, 400, 800, 1600... + // Event tofs should double, it will be roughly 150, 300, 600, 1200... + for (int i = 9; i < 18; i++) { + TS_ASSERT_EQUALS(el_output.getEvent(i).pulseTime().totalNanoseconds(), 2500000000) + TS_ASSERT_EQUALS(el_output.getEvent(i).weight(), 200 * pow(2, i - 9)) + TS_ASSERT_EQUALS(el_output.getEvent(i).errorSquared(), 200 * pow(2, i - 9)) + TS_ASSERT_DELTA(el_output.getEvent(i).tof(), 150 * pow(2, i - 9), 0.5) + } + + // Third pulse group with pulsetime = 4000ms + // Event weights should double for each one, 100, 200, 400, 800... + // Event tofs should double, it will be roughly 150, 300, 600, 1200... + for (int i = 18; i < 27; i++) { + TS_ASSERT_EQUALS(el_output.getEvent(i).pulseTime().totalNanoseconds(), 4000000000) + TS_ASSERT_EQUALS(el_output.getEvent(i).weight(), 100 * pow(2, i - 18)) + TS_ASSERT_EQUALS(el_output.getEvent(i).errorSquared(), 100 * pow(2, i - 18)) + TS_ASSERT_DELTA(el_output.getEvent(i).tof(), 150 * pow(2, i - 18), 0.5) + } + + // histogram to compare to un-compressed + el_output.generateHistogram(X, Y, E); + + for (size_t i = 0; i < Y.size(); i++) { + TS_ASSERT_EQUALS(expected_Y[i], Y[i]); + TS_ASSERT_EQUALS(expected_E[i], E[i]); + } + } + + void test_compressFatEvents_log2() { + el = EventList(); + el += TofEvent(100, DateAndTime{0, 0}); + el += TofEvent(1000, DateAndTime{0, 0}); + el += TofEvent(1001, DateAndTime{0, 0}); + el += TofEvent(100, DateAndTime{3, 0}); + el += TofEvent(1000, DateAndTime{3, 0}); + el += TofEvent(1001, DateAndTime{3, 0}); + el += TofEvent(100, DateAndTime{6, 0}); + el += TofEvent(1000, DateAndTime{6, 0}); + el += TofEvent(1001, DateAndTime{6, 0}); + + // Do compress events with log binning. + // With a large pulseTime delta of 10 seconds we should end up with 2 events with same pulsetime but different TOFs + EventList el_output; + TS_ASSERT_THROWS_NOTHING(el.compressFatEvents(-1, DateAndTime{0}, 10, &el_output)); + + TS_ASSERT_EQUALS(el.getNumberEvents(), 9) + TS_ASSERT_EQUALS(el_output.getNumberEvents(), 2) + + TS_ASSERT_EQUALS(el_output.getEvent(0).weight(), 3) + TS_ASSERT_EQUALS(el_output.getEvent(0).errorSquared(), 3) + TS_ASSERT_DELTA(el_output.getEvent(0).tof(), 100, 1e-5) + TS_ASSERT_DELTA(el_output.getEvent(0).pulseTime().totalNanoseconds(), 3000000000, 1e-5) + + TS_ASSERT_EQUALS(el_output.getEvent(1).weight(), 6) + TS_ASSERT_EQUALS(el_output.getEvent(1).errorSquared(), 6) + TS_ASSERT_DELTA(el_output.getEvent(1).tof(), 1000.5, 1e-5) + TS_ASSERT_DELTA(el_output.getEvent(1).pulseTime().totalNanoseconds(), 3000000000, 1e-5) + + // Do compress events with log binning. + // With a pulseTime delta of 5 seconds we should end up with 4 events + el_output = EventList(); + TS_ASSERT_THROWS_NOTHING(el.compressFatEvents(-1, DateAndTime{0}, 5, &el_output)); + + TS_ASSERT_EQUALS(el.getNumberEvents(), 9) + TS_ASSERT_EQUALS(el_output.getNumberEvents(), 4) + + TS_ASSERT_EQUALS(el_output.getEvent(0).weight(), 2) + TS_ASSERT_EQUALS(el_output.getEvent(0).errorSquared(), 2) + TS_ASSERT_DELTA(el_output.getEvent(0).tof(), 100, 1e-5) + TS_ASSERT_DELTA(el_output.getEvent(0).pulseTime().totalNanoseconds(), 1500000000, 1e-5) + + TS_ASSERT_EQUALS(el_output.getEvent(1).weight(), 4) + TS_ASSERT_EQUALS(el_output.getEvent(1).errorSquared(), 4) + TS_ASSERT_DELTA(el_output.getEvent(1).tof(), 1000.5, 1e-5) + TS_ASSERT_DELTA(el_output.getEvent(1).pulseTime().totalNanoseconds(), 1500000000, 1e-5) + + TS_ASSERT_EQUALS(el_output.getEvent(2).weight(), 1) + TS_ASSERT_EQUALS(el_output.getEvent(2).errorSquared(), 1) + TS_ASSERT_DELTA(el_output.getEvent(2).tof(), 100, 1e-5) + TS_ASSERT_DELTA(el_output.getEvent(2).pulseTime().totalNanoseconds(), 6000000000, 1e-5) + + TS_ASSERT_EQUALS(el_output.getEvent(3).weight(), 2) + TS_ASSERT_EQUALS(el_output.getEvent(3).errorSquared(), 2) + TS_ASSERT_DELTA(el_output.getEvent(3).tof(), 1000.5, 1e-5) + TS_ASSERT_DELTA(el_output.getEvent(3).pulseTime().totalNanoseconds(), 6000000000, 1e-5) + } + + void test_compressFatEvents_log3() { + // check the behavior when TOF is zero or negative + el = EventList(); + el += TofEvent(0.5, 1); + el += TofEvent(1, 2); + el += TofEvent(0, 3); + + // Do compress events with log binning + // Since there is a tof==0 then the first bin_end should be 1 + EventList el_output; + TS_ASSERT_THROWS_NOTHING(el.compressFatEvents(-1, DateAndTime{0}, 10, &el_output)) + + // now check individual events + TS_ASSERT_EQUALS(el_output.getNumberEvents(), 2) + + TS_ASSERT_EQUALS(el_output.getEvent(0).weight(), 2) + TS_ASSERT_EQUALS(el_output.getEvent(0).errorSquared(), 2) + TS_ASSERT_DELTA(el_output.getEvent(0).tof(), 0.25, 1e-5) + + TS_ASSERT_EQUALS(el_output.getEvent(1).weight(), 1) + TS_ASSERT_EQUALS(el_output.getEvent(1).errorSquared(), 1) + TS_ASSERT_DELTA(el_output.getEvent(1).tof(), 1, 1e-5) + + // now add a negative TOF and it should throw + el += TofEvent(-1, 0); + TS_ASSERT_THROWS(el.compressFatEvents(-1, DateAndTime{0}, 10, &el_output), const std::runtime_error &) + } + //================================================================================== // Mocking functions //================================================================================== diff --git a/buildconfig/CMake/CppCheck_Suppressions.txt.in b/buildconfig/CMake/CppCheck_Suppressions.txt.in index c7d56e764c05..7bcece0e65b0 100644 --- a/buildconfig/CMake/CppCheck_Suppressions.txt.in +++ b/buildconfig/CMake/CppCheck_Suppressions.txt.in @@ -814,11 +814,11 @@ virtualCallInConstructor:${CMAKE_SOURCE_DIR}/Framework/DataObjects/inc/MantidDat identicalConditionAfterEarlyExit:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:999 identicalConditionAfterEarlyExit:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:1095 virtualCallInConstructor:${CMAKE_SOURCE_DIR}/Framework/DataObjects/inc/MantidDataObjects/TableWorkspace.h:199 -constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:3890 -constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:4037 -constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:4164 -constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:4165 -derefInvalidIterator:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:1560 +constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:3962 +constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:4109 +constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:4236 +constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:4237 +derefInvalidIterator:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:1590 constVariableReference:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/TableWorkspace.cpp:253 constVariablePointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/TableWorkspace.cpp:318 cstyleCast:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/LeanElasticPeaksWorkspace.cpp:620 diff --git a/docs/source/algorithms/CompressEvents-v1.rst b/docs/source/algorithms/CompressEvents-v1.rst index 31d04b4c265e..af903ff1b8f0 100644 --- a/docs/source/algorithms/CompressEvents-v1.rst +++ b/docs/source/algorithms/CompressEvents-v1.rst @@ -68,6 +68,11 @@ appear in the ``OutputWorkspace``. If it is not specified, then the format for the ``StartTime`` is ``2010-09-14T04:20:12``. Normally this parameter can be left unset. +Logarithmic binning +################### + +If you provide a negative tolerance or select ``Logarithmic`` as the ``BinningMode`` then the events will be combined together in increase large tolerances starting from the smallest TOF value. This follows the same method as the logarithmic binning of :ref:`algm-Rebin`. This mode will fail if any of the TOF values are negative. + Usage ----- diff --git a/docs/source/release/v6.10.0/Framework/Algorithms/New_features/37203.rst b/docs/source/release/v6.10.0/Framework/Algorithms/New_features/37203.rst new file mode 100644 index 000000000000..4eb0544f031a --- /dev/null +++ b/docs/source/release/v6.10.0/Framework/Algorithms/New_features/37203.rst @@ -0,0 +1 @@ +- Add the ability for :ref:`algm-CompressEvents` to combine events together in a logarithmic increasing size groups.