From 1fc49d7c0bc61350e34b42d3f6d18319b5c33cd8 Mon Sep 17 00:00:00 2001 From: Ross Whitfield Date: Fri, 19 Apr 2024 12:59:28 +1000 Subject: [PATCH 01/11] Add log compression to EventList::compressEventsHelper --- Framework/DataObjects/src/EventList.cpp | 96 +++++++++++++++------- Framework/DataObjects/test/EventListTest.h | 67 +++++++++++++++ 2 files changed, 135 insertions(+), 28 deletions(-) diff --git a/Framework/DataObjects/src/EventList.cpp b/Framework/DataObjects/src/EventList.cpp index c9b7f30e1d9d..9aae43a46b66 100644 --- a/Framework/DataObjects/src/EventList.cpp +++ b/Framework/DataObjects/src/EventList.cpp @@ -1450,7 +1450,7 @@ inline double calcNorm(const double errorSquared) { * @param events :: input event list. * @param out :: output WeightedEventNoTime vector. * @param tolerance :: how close do two event's TOF have to be to be considered - *the same. + *the same. Negative implies log grouping. */ template @@ -1471,34 +1471,74 @@ inline void EventList::compressEventsHelper(const std::vector &events, std::v double errorSquared = 0; double normalization = 0.; - for (auto it = events.cbegin(); it != events.cend(); it++) { - if ((it->m_tof - lastTof) <= tolerance) { - // Carry the error and weight - weight += it->weight(); - errorSquared += it->errorSquared(); - // Track the average tof - num++; - const double norm = calcNorm(it->errorSquared()); - normalization += norm; - totalTof += it->m_tof * norm; - } else { - // We exceeded the tolerance - // Create a new event with the average TOF and summed weights and - // squared errors. - if (num == 1) { - // last time-of-flight is the only one contributing - out.emplace_back(lastTof, weight, errorSquared); - } else if (num > 1) { - out.emplace_back(totalTof / normalization, weight, errorSquared); + if (tolerance < 0) { + lastTof = events.front().m_tof; + double bin_end = lastTof * (1 - tolerance); + + for (auto it = events.cbegin(); it != events.cend(); it++) { + if (it->m_tof < bin_end) { + // Carry the error and weight + weight += it->weight(); + errorSquared += it->errorSquared(); + // Track the average tof + num++; + const double norm = calcNorm(it->errorSquared()); + normalization += norm; + totalTof += it->m_tof * norm; + } else { + // We exceeded the tolerance + // Create a new event with the average TOF and summed weights and + // squared errors. + if (num == 1) { + // last time-of-flight is the only one contributing + out.emplace_back(lastTof, weight, errorSquared); + } else if (num > 1) { + out.emplace_back(totalTof / normalization, weight, errorSquared); + } + // Start a new combined object + num = 1; + const double norm = calcNorm(it->errorSquared()); + normalization = norm; + totalTof = it->m_tof * norm; + weight = it->weight(); + errorSquared = it->errorSquared(); + lastTof = it->m_tof; + + // advance the bin_end until we find the one that this next event falls into + while (lastTof >= bin_end) + bin_end = bin_end * (1 - tolerance); + } + } + } else { + for (auto it = events.cbegin(); it != events.cend(); it++) { + if ((it->m_tof - lastTof) <= tolerance) { + // Carry the error and weight + weight += it->weight(); + errorSquared += it->errorSquared(); + // Track the average tof + num++; + const double norm = calcNorm(it->errorSquared()); + normalization += norm; + totalTof += it->m_tof * norm; + } else { + // We exceeded the tolerance + // Create a new event with the average TOF and summed weights and + // squared errors. + if (num == 1) { + // last time-of-flight is the only one contributing + out.emplace_back(lastTof, weight, errorSquared); + } else if (num > 1) { + out.emplace_back(totalTof / normalization, weight, errorSquared); + } + // Start a new combined object + num = 1; + const double norm = calcNorm(it->errorSquared()); + normalization = norm; + totalTof = it->m_tof * norm; + weight = it->weight(); + errorSquared = it->errorSquared(); + lastTof = it->m_tof; } - // Start a new combined object - num = 1; - const double norm = calcNorm(it->errorSquared()); - normalization = norm; - totalTof = it->m_tof * norm; - weight = it->weight(); - errorSquared = it->errorSquared(); - lastTof = it->m_tof; } } diff --git a/Framework/DataObjects/test/EventListTest.h b/Framework/DataObjects/test/EventListTest.h index bc20e59bd6fd..7ea450d3bc63 100644 --- a/Framework/DataObjects/test/EventListTest.h +++ b/Framework/DataObjects/test/EventListTest.h @@ -2159,6 +2159,73 @@ class EventListTest : public CxxTest::TestSuite { // last value } + void test_compressEvents_log() { + this->fake_uniform_data(10000.); + + // First lets compare histogramming with compression versus without. + // This may only work exactly when the smallest tof is equal to the minimum rebin parameter, + // in this case 100 + MantidVec X, expected_Y, expected_E, Y, E; + VectorHelper::createAxisFromRebinParams({100., -1., 819200}, X, true); + + // get expected results + el.generateHistogram(X, expected_Y, expected_E); + + // do compress events with log binning then histogram to compare to un-compressed + EventList el_output; + TS_ASSERT_THROWS_NOTHING(el.compressEvents(-1, &el_output)); + el_output.generateHistogram(X, Y, E); + + for (size_t i = 0; i < Y.size(); i++) { + TS_ASSERT_EQUALS(expected_Y[i], Y[i]); + TS_ASSERT_EQUALS(expected_E[i], E[i]); + } + + // now check individual events + TS_ASSERT_EQUALS(el.getNumberEvents(), 9999900); + TS_ASSERT_EQUALS(el_output.getNumberEvents(), 17); + + // event weights should double for each one, 100, 200, 400, 800... + // event tofs should double, it will be roughly 150, 300, 600, 1200... + + // don't check last event as bin will be partially filled + for (int i = 0; i < 16; i++) { + TS_ASSERT_EQUALS(el_output.getEvent(i).weight(), 100 * pow(2, i)) + TS_ASSERT_EQUALS(el_output.getEvent(i).errorSquared(), 100 * pow(2, i)) + TS_ASSERT_DELTA(el_output.getEvent(i).tof(), 150 * pow(2, i), 0.5) + } + } + + void test_compressEvents_log2() { + // Check that for very sparse events + // Add four events, one that is repeated + // Should result in 3 events, with the second of weight 2 + el = EventList(); + el += TofEvent(100, 0); + el += TofEvent(1000, 0); + el += TofEvent(1000, 0); + el += TofEvent(100000, 0); + + // do compress events with log binning + EventList el_output; + TS_ASSERT_THROWS_NOTHING(el.compressEvents(-0.01, &el_output)) + + // now check individual events + TS_ASSERT_EQUALS(el_output.getNumberEvents(), 3) + + TS_ASSERT_EQUALS(el_output.getEvent(0).weight(), 1) + TS_ASSERT_EQUALS(el_output.getEvent(0).errorSquared(), 1) + TS_ASSERT_DELTA(el_output.getEvent(0).tof(), 100, 1e-5) + + TS_ASSERT_EQUALS(el_output.getEvent(1).weight(), 2) + TS_ASSERT_EQUALS(el_output.getEvent(1).errorSquared(), 2) + TS_ASSERT_DELTA(el_output.getEvent(1).tof(), 1000, 1e-5) + + TS_ASSERT_EQUALS(el_output.getEvent(2).weight(), 1) + TS_ASSERT_EQUALS(el_output.getEvent(2).errorSquared(), 1) + TS_ASSERT_DELTA(el_output.getEvent(2).tof(), 100000, 1e-5) + } + //================================================================================== // Mocking functions //================================================================================== From 6bb2f2aa3ad3cf6213044ca8416b19bac6688c32 Mon Sep 17 00:00:00 2001 From: Ross Whitfield Date: Tue, 23 Apr 2024 12:44:50 +1000 Subject: [PATCH 02/11] Add log compression to EventList::compressFatEventsHelper --- Framework/DataObjects/src/EventList.cpp | 133 +++++++++++++++------ Framework/DataObjects/test/EventListTest.h | 124 +++++++++++++++++++ 2 files changed, 220 insertions(+), 37 deletions(-) diff --git a/Framework/DataObjects/src/EventList.cpp b/Framework/DataObjects/src/EventList.cpp index 9aae43a46b66..c3deccce517d 100644 --- a/Framework/DataObjects/src/EventList.cpp +++ b/Framework/DataObjects/src/EventList.cpp @@ -1598,45 +1598,104 @@ inline void EventList::compressFatEventsHelper(const std::vector &events, std // bin if the pulses are histogrammed int64_t lastPulseBin = (it->m_pulsetime.totalNanoseconds() - pulsetimeStart) / pulsetimeDelta; - // loop through events and accumulate weight - for (; it != events.cend(); ++it) { - const int64_t eventPulseBin = (it->m_pulsetime.totalNanoseconds() - pulsetimeStart) / pulsetimeDelta; - if ((eventPulseBin <= lastPulseBin) && (std::fabs(it->m_tof - lastTof) <= tolerance)) { - // Carry the error and weight - weight += it->weight(); - errorSquared += it->errorSquared(); - double norm = calcNorm(it->errorSquared()); - tofNormalization += norm; - // Track the average tof - totalTof += it->m_tof * norm; - // Accumulate the pulse times - pulsetimes.emplace_back(it->m_pulsetime); - pulsetimeWeights.emplace_back(norm); - } else { - // We exceeded the tolerance - if (!pulsetimes.empty()) { - // Create a new event with the average TOF and summed weights and - // squared errors. 1 event used doesn't need to average - if (pulsetimes.size() == 1) { - out.emplace_back(lastTof, pulsetimes.front(), weight, errorSquared); - } else { - out.emplace_back(totalTof / tofNormalization, - Kernel::DateAndTimeHelpers::averageSorted(pulsetimes, pulsetimeWeights), weight, - errorSquared); + + if (tolerance < 0) { + // get minimum Tof so that binning is consistent across all pulses + const auto event_min = std::min_element( + events.cbegin(), events.cend(), [](const auto &left, const auto &right) { return left.tof() < right.tof(); }); + const auto tof_min = event_min->tof(); + + lastTof = events.front().m_tof; + double bin_end = tof_min * (1 - tolerance); + + // loop through events and accumulate weight + for (; it != events.cend(); ++it) { + const int64_t eventPulseBin = (it->m_pulsetime.totalNanoseconds() - pulsetimeStart) / pulsetimeDelta; + if ((eventPulseBin <= lastPulseBin) && (it->m_tof < bin_end)) { + // Carry the error and weight + weight += it->weight(); + errorSquared += it->errorSquared(); + double norm = calcNorm(it->errorSquared()); + tofNormalization += norm; + // Track the average tof + totalTof += it->m_tof * norm; + // Accumulate the pulse times + pulsetimes.emplace_back(it->m_pulsetime); + pulsetimeWeights.emplace_back(norm); + } else { + // We exceeded the tolerance + if (!pulsetimes.empty()) { + // Create a new event with the average TOF and summed weights and + // squared errors. 1 event used doesn't need to average + if (pulsetimes.size() == 1) { + out.emplace_back(lastTof, pulsetimes.front(), weight, errorSquared); + } else { + out.emplace_back(totalTof / tofNormalization, + Kernel::DateAndTimeHelpers::averageSorted(pulsetimes, pulsetimeWeights), weight, + errorSquared); + } } + // Start a new combined object + double norm = calcNorm(it->errorSquared()); + totalTof = it->m_tof * norm; + weight = it->weight(); + errorSquared = it->errorSquared(); + tofNormalization = norm; + lastTof = it->m_tof; + if (eventPulseBin != lastPulseBin) + bin_end = tof_min * (1 - tolerance); + lastPulseBin = eventPulseBin; + pulsetimes.clear(); + pulsetimes.emplace_back(it->m_pulsetime); + pulsetimeWeights.clear(); + pulsetimeWeights.emplace_back(norm); + + // advance the bin_end until we find the one that this next event falls into + while (lastTof >= bin_end) + bin_end = bin_end * (1 - tolerance); + } + } + } else { + // loop through events and accumulate weight + for (; it != events.cend(); ++it) { + const int64_t eventPulseBin = (it->m_pulsetime.totalNanoseconds() - pulsetimeStart) / pulsetimeDelta; + if ((eventPulseBin <= lastPulseBin) && (std::fabs(it->m_tof - lastTof) <= tolerance)) { + // Carry the error and weight + weight += it->weight(); + errorSquared += it->errorSquared(); + double norm = calcNorm(it->errorSquared()); + tofNormalization += norm; + // Track the average tof + totalTof += it->m_tof * norm; + // Accumulate the pulse times + pulsetimes.emplace_back(it->m_pulsetime); + pulsetimeWeights.emplace_back(norm); + } else { + // We exceeded the tolerance + if (!pulsetimes.empty()) { + // Create a new event with the average TOF and summed weights and + // squared errors. 1 event used doesn't need to average + if (pulsetimes.size() == 1) { + out.emplace_back(lastTof, pulsetimes.front(), weight, errorSquared); + } else { + out.emplace_back(totalTof / tofNormalization, + Kernel::DateAndTimeHelpers::averageSorted(pulsetimes, pulsetimeWeights), weight, + errorSquared); + } + } + // Start a new combined object + double norm = calcNorm(it->errorSquared()); + totalTof = it->m_tof * norm; + weight = it->weight(); + errorSquared = it->errorSquared(); + tofNormalization = norm; + lastTof = it->m_tof; + lastPulseBin = eventPulseBin; + pulsetimes.clear(); + pulsetimes.emplace_back(it->m_pulsetime); + pulsetimeWeights.clear(); + pulsetimeWeights.emplace_back(norm); } - // Start a new combined object - double norm = calcNorm(it->errorSquared()); - totalTof = it->m_tof * norm; - weight = it->weight(); - errorSquared = it->errorSquared(); - tofNormalization = norm; - lastTof = it->m_tof; - lastPulseBin = eventPulseBin; - pulsetimes.clear(); - pulsetimes.emplace_back(it->m_pulsetime); - pulsetimeWeights.clear(); - pulsetimeWeights.emplace_back(norm); } } diff --git a/Framework/DataObjects/test/EventListTest.h b/Framework/DataObjects/test/EventListTest.h index 7ea450d3bc63..fd80bff12673 100644 --- a/Framework/DataObjects/test/EventListTest.h +++ b/Framework/DataObjects/test/EventListTest.h @@ -2226,6 +2226,130 @@ class EventListTest : public CxxTest::TestSuite { TS_ASSERT_DELTA(el_output.getEvent(2).tof(), 100000, 1e-5) } + void test_compressFatEvents_log() { + el = EventList(); + for (int pulseTime = 0; pulseTime < 5; pulseTime++) + for (double tof = 100; tof < 51200; tof++) + el += TofEvent(tof, DateAndTime{pulseTime, 0}); + + // First lets compare histogramming with compression versus without. + // This may only work exactly when the smallest tof is equal to the minimum rebin parameter, + // in this case 100 + MantidVec X, expected_Y, expected_E, Y, E; + VectorHelper::createAxisFromRebinParams({100., -1., 51200}, X, true); + + // get expected results + el.generateHistogram(X, expected_Y, expected_E); + + /// do compress events with log binning + EventList el_output; + TS_ASSERT_THROWS_NOTHING(el.compressFatEvents(-1, DateAndTime{0}, 2., &el_output)); + + // check individual events + // with a pulsetime delta of 2 seconds we should end up with 3 pulsetime groups with 9 events in each group, so 27 + // events total + + TS_ASSERT_EQUALS(el.getNumberEvents(), 255500); + TS_ASSERT_EQUALS(el_output.getNumberEvents(), 27); + + // First pulse group with pulsetime = 500ms + // Event weights should double for each one, 200, 400, 800, 1600... + // Event tofs should double, it will be roughly 150, 300, 600, 1200... + for (int i = 0; i < 9; i++) { + TS_ASSERT_EQUALS(el_output.getEvent(i).pulseTime().totalNanoseconds(), 500000000) + TS_ASSERT_EQUALS(el_output.getEvent(i).weight(), 200 * pow(2, i)) + TS_ASSERT_EQUALS(el_output.getEvent(i).errorSquared(), 200 * pow(2, i)) + TS_ASSERT_DELTA(el_output.getEvent(i).tof(), 150 * pow(2, i), 0.5) + } + + // Second pulse group with pulsetime = 2500ms + // Event weights should double for each one, 200, 400, 800, 1600... + // Event tofs should double, it will be roughly 150, 300, 600, 1200... + for (int i = 9; i < 18; i++) { + TS_ASSERT_EQUALS(el_output.getEvent(i).pulseTime().totalNanoseconds(), 2500000000) + TS_ASSERT_EQUALS(el_output.getEvent(i).weight(), 200 * pow(2, i - 9)) + TS_ASSERT_EQUALS(el_output.getEvent(i).errorSquared(), 200 * pow(2, i - 9)) + TS_ASSERT_DELTA(el_output.getEvent(i).tof(), 150 * pow(2, i - 9), 0.5) + } + + // Third pulse group with pulsetime = 4000ms + // Event weights should double for each one, 100, 200, 400, 800... + // Event tofs should double, it will be roughly 150, 300, 600, 1200... + for (int i = 18; i < 27; i++) { + TS_ASSERT_EQUALS(el_output.getEvent(i).pulseTime().totalNanoseconds(), 4000000000) + TS_ASSERT_EQUALS(el_output.getEvent(i).weight(), 100 * pow(2, i - 18)) + TS_ASSERT_EQUALS(el_output.getEvent(i).errorSquared(), 100 * pow(2, i - 18)) + TS_ASSERT_DELTA(el_output.getEvent(i).tof(), 150 * pow(2, i - 18), 0.5) + } + + // histogram to compare to un-compressed + el_output.generateHistogram(X, Y, E); + + for (size_t i = 0; i < Y.size(); i++) { + TS_ASSERT_EQUALS(expected_Y[i], Y[i]); + TS_ASSERT_EQUALS(expected_E[i], E[i]); + } + } + + void test_compressFatEvents_log2() { + el = EventList(); + el += TofEvent(100, DateAndTime{0, 0}); + el += TofEvent(1000, DateAndTime{0, 0}); + el += TofEvent(1001, DateAndTime{0, 0}); + el += TofEvent(100, DateAndTime{3, 0}); + el += TofEvent(1000, DateAndTime{3, 0}); + el += TofEvent(1001, DateAndTime{3, 0}); + el += TofEvent(100, DateAndTime{6, 0}); + el += TofEvent(1000, DateAndTime{6, 0}); + el += TofEvent(1001, DateAndTime{6, 0}); + + // Do compress events with log binning. + // With a large pulseTime delta of 10 seconds we should end up with 2 events with same pulsetime but different TOFs + EventList el_output; + TS_ASSERT_THROWS_NOTHING(el.compressFatEvents(-1, DateAndTime{0}, 10, &el_output)); + + TS_ASSERT_EQUALS(el.getNumberEvents(), 9) + TS_ASSERT_EQUALS(el_output.getNumberEvents(), 2) + + TS_ASSERT_EQUALS(el_output.getEvent(0).weight(), 3) + TS_ASSERT_EQUALS(el_output.getEvent(0).errorSquared(), 3) + TS_ASSERT_DELTA(el_output.getEvent(0).tof(), 100, 1e-5) + TS_ASSERT_DELTA(el_output.getEvent(0).pulseTime().totalNanoseconds(), 3000000000, 1e-5) + + TS_ASSERT_EQUALS(el_output.getEvent(1).weight(), 6) + TS_ASSERT_EQUALS(el_output.getEvent(1).errorSquared(), 6) + TS_ASSERT_DELTA(el_output.getEvent(1).tof(), 1000.5, 1e-5) + TS_ASSERT_DELTA(el_output.getEvent(1).pulseTime().totalNanoseconds(), 3000000000, 1e-5) + + // Do compress events with log binning. + // With a pulseTime delta of 5 seconds we should end up with 4 events + el_output = EventList(); + TS_ASSERT_THROWS_NOTHING(el.compressFatEvents(-1, DateAndTime{0}, 5, &el_output)); + + TS_ASSERT_EQUALS(el.getNumberEvents(), 9) + TS_ASSERT_EQUALS(el_output.getNumberEvents(), 4) + + TS_ASSERT_EQUALS(el_output.getEvent(0).weight(), 2) + TS_ASSERT_EQUALS(el_output.getEvent(0).errorSquared(), 2) + TS_ASSERT_DELTA(el_output.getEvent(0).tof(), 100, 1e-5) + TS_ASSERT_DELTA(el_output.getEvent(0).pulseTime().totalNanoseconds(), 1500000000, 1e-5) + + TS_ASSERT_EQUALS(el_output.getEvent(1).weight(), 4) + TS_ASSERT_EQUALS(el_output.getEvent(1).errorSquared(), 4) + TS_ASSERT_DELTA(el_output.getEvent(1).tof(), 1000.5, 1e-5) + TS_ASSERT_DELTA(el_output.getEvent(1).pulseTime().totalNanoseconds(), 1500000000, 1e-5) + + TS_ASSERT_EQUALS(el_output.getEvent(2).weight(), 1) + TS_ASSERT_EQUALS(el_output.getEvent(2).errorSquared(), 1) + TS_ASSERT_DELTA(el_output.getEvent(2).tof(), 100, 1e-5) + TS_ASSERT_DELTA(el_output.getEvent(2).pulseTime().totalNanoseconds(), 6000000000, 1e-5) + + TS_ASSERT_EQUALS(el_output.getEvent(3).weight(), 2) + TS_ASSERT_EQUALS(el_output.getEvent(3).errorSquared(), 2) + TS_ASSERT_DELTA(el_output.getEvent(3).tof(), 1000.5, 1e-5) + TS_ASSERT_DELTA(el_output.getEvent(3).pulseTime().totalNanoseconds(), 6000000000, 1e-5) + } + //================================================================================== // Mocking functions //================================================================================== From 55f60f7d06e57c878ba40d95cacb62e566c7090a Mon Sep 17 00:00:00 2001 From: Ross Whitfield Date: Wed, 24 Apr 2024 14:13:02 +1000 Subject: [PATCH 03/11] Add tests for Logarithmic binning in CompressEvents --- .../DataHandling/test/CompressEventsTest.h | 71 ++++++++++++++++++- 1 file changed, 70 insertions(+), 1 deletion(-) diff --git a/Framework/DataHandling/test/CompressEventsTest.h b/Framework/DataHandling/test/CompressEventsTest.h index 27e2cc2f4285..be10a5fdb657 100644 --- a/Framework/DataHandling/test/CompressEventsTest.h +++ b/Framework/DataHandling/test/CompressEventsTest.h @@ -32,7 +32,6 @@ class CompressEventsTest : public CxxTest::TestSuite { void test_InvalidInputs() { CompressEvents alg; TS_ASSERT_THROWS_NOTHING(alg.initialize()); - TS_ASSERT_THROWS(alg.setPropertyValue("Tolerance", "-1.0"), const std::invalid_argument &); TS_ASSERT_THROWS_NOTHING(alg.setPropertyValue("Tolerance", "0.0")); } @@ -128,4 +127,74 @@ class CompressEventsTest : public CxxTest::TestSuite { void test_InPlace_ZeroTolerance_WithPulseTime() { doTest("CompressEvents_input", "CompressEvents_input", 0.0, 50, .001); } + + void doLogarithmicTest(const std::string &binningMode, const double tolerance, double wallClockTolerance = 0.) { + EventWorkspace_sptr input, output; + EventType eventType = WEIGHTED_NOTIME; + if (wallClockTolerance > 0.) + eventType = WEIGHTED; + + /** Create event workspace with: + * 1 pixels (or another number) + * 64 histogrammed bins from 0.0 in steps of 1.0 + * 128 events; two in each bin, at time 1.0, 2.0, etc. + * PulseTime = 1 second, 2 seconds, etc. + */ + input = WorkspaceCreationHelper::createEventWorkspace(1, 64, 64, 0, 1, 2); + AnalysisDataService::Instance().addOrReplace("CompressEvents_input", input); + + TS_ASSERT_EQUALS(input->getNumberEvents(), 128); + const double inputIntegral = input->getSpectrum(0).integrate(0., 100., true); + + CompressEvents alg; + alg.initialize(); + alg.setPropertyValue("InputWorkspace", "CompressEvents_input"); + alg.setPropertyValue("OutputWorkspace", "CompressEvents_output"); + alg.setProperty("Tolerance", tolerance); + alg.setPropertyValue("BinningMode", binningMode); + if (wallClockTolerance > 0.) { + alg.setProperty("WallClockTolerance", wallClockTolerance); + alg.setProperty("StartTime", + "2010-01-01T00:00:00"); // copied from createEventWorkspace + } + + TS_ASSERT_THROWS_NOTHING(alg.execute()); + TS_ASSERT(alg.isExecuted()); + + output = AnalysisDataService::Instance().retrieveWS("CompressEvents_output"); + + TS_ASSERT_EQUALS(output->getNumberEvents(), 7); + TS_ASSERT_EQUALS(output->getEventType(), eventType); + TS_ASSERT_DELTA(output->getSpectrum(0).integrate(0., 100., true), inputIntegral, 1.e-6); + + EventList el = output->getSpectrum(0); + TS_ASSERT_DELTA(el.getEvent(0).weight(), 2.0, 1e-6); + TS_ASSERT_DELTA(el.getEvent(0).errorSquared(), 2.0, 1e-6); + TS_ASSERT_DELTA(el.getEvent(0).tof(), 0.5, 1e-6); + for (int i = 1; i < 7; i++) { + TS_ASSERT_DELTA(el.getEvent(i).weight(), 1.0 * pow(2, i), 1e-6); + TS_ASSERT_DELTA(el.getEvent(i).errorSquared(), 1.0 * pow(2, i), 1e-6); + TS_ASSERT_DELTA(el.getEvent(i).tof(), 0.75 * pow(2, i), 1e-6); + } + + if (wallClockTolerance > 0.) { + int64_t firstTime = 631152000000000000; + TS_ASSERT_EQUALS(el.getEvent(0).pulseTime().totalNanoseconds(), firstTime); + TS_ASSERT_EQUALS(el.getEvent(1).pulseTime().totalNanoseconds(), firstTime + 1000000000); + TS_ASSERT_EQUALS(el.getEvent(2).pulseTime().totalNanoseconds(), firstTime + 2500000000); + TS_ASSERT_EQUALS(el.getEvent(3).pulseTime().totalNanoseconds(), firstTime + 5500000000); + TS_ASSERT_EQUALS(el.getEvent(4).pulseTime().totalNanoseconds(), firstTime + 11500000000); + TS_ASSERT_EQUALS(el.getEvent(5).pulseTime().totalNanoseconds(), firstTime + 23500000000); + TS_ASSERT_EQUALS(el.getEvent(6).pulseTime().totalNanoseconds(), firstTime + 47500000000); + } else { + for (int i = 0; i < 7; i++) { + TS_ASSERT_EQUALS(el.getEvent(i).pulseTime().totalNanoseconds(), 0); + } + } + } + + void test_Logarithmic_binning() { doLogarithmicTest("Logarithmic", 1.); } + void test_Logarithmic_binning_default() { doLogarithmicTest("Default", -1.); } + void test_Logarithmic_binning_WithPulseTime() { doLogarithmicTest("Logarithmic", 1., 64); } + void test_Logarithmic_binning_default_WithPulseTime() { doLogarithmicTest("Default", -1., 64); } }; From bc8b600ae91e639787a2a93c386c01fbc999e8b1 Mon Sep 17 00:00:00 2001 From: Ross Whitfield Date: Wed, 24 Apr 2024 15:46:14 +1000 Subject: [PATCH 04/11] Add BinningMode parameter to CompressEvents --- Framework/DataHandling/src/CompressEvents.cpp | 29 +++++++++++++++++-- docs/source/algorithms/CompressEvents-v1.rst | 5 ++++ 2 files changed, 31 insertions(+), 3 deletions(-) diff --git a/Framework/DataHandling/src/CompressEvents.cpp b/Framework/DataHandling/src/CompressEvents.cpp index 777d58da6fd8..55b7257bddc4 100644 --- a/Framework/DataHandling/src/CompressEvents.cpp +++ b/Framework/DataHandling/src/CompressEvents.cpp @@ -12,6 +12,8 @@ #include "MantidKernel/BoundedValidator.h" #include "MantidKernel/DateAndTimeHelpers.h" #include "MantidKernel/DateTimeValidator.h" +#include "MantidKernel/EnumeratedString.h" +#include "MantidKernel/ListValidator.h" #include "tbb/parallel_for.h" @@ -26,6 +28,12 @@ using namespace Kernel; using namespace API; using namespace DataObjects; +namespace { +const std::vector binningModeNames{"Default", "Linear", "Logarithmic"}; +enum class BinningMode { DEFAULT, LINEAR, LOGARITHMIC, enum_count }; +typedef Mantid::Kernel::EnumeratedString BINMODE; +} // namespace + void CompressEvents::init() { declareProperty(std::make_unique>("InputWorkspace", "", Direction::Input), "The name of the EventWorkspace on which to perform the algorithm"); @@ -36,10 +44,12 @@ void CompressEvents::init() { // Tolerance must be >= 0.0 auto mustBePositive = std::make_shared>(); mustBePositive->setLower(0.0); - declareProperty(std::make_unique>("Tolerance", 1e-5, mustBePositive, Direction::Input), + declareProperty(std::make_unique>("Tolerance", 1e-5, Direction::Input), "The tolerance on each event's X value (normally TOF, but may be a " "different unit if you have used ConvertUnits).\n" - "Any events within Tolerance will be summed into a single event."); + "Any events within Tolerance will be summed into a single event. When compressing where positive is " + "linear tolerance, negative is logorithmic tolerance, and zero indicates that time-of-flight must be " + "identical to compress."); declareProperty( std::make_unique>("WallClockTolerance", EMPTY_DBL(), mustBePositive, Direction::Input), @@ -54,15 +64,28 @@ void CompressEvents::init() { "starting filtering. Ignored if WallClockTolerance is not specified. " "Default is start of run", Direction::Input); + + declareProperty("BinningMode", binningModeNames[size_t(BinningMode::DEFAULT)], + std::make_shared(binningModeNames), + "Binning behavior can be specified in the usual way through sign of tolerance and other properties " + "('Default'); or can be set to one of the allowed binning modes. This will override all other " + "specification or default behavior."); } void CompressEvents::exec() { // Get the input workspace EventWorkspace_sptr inputWS = getProperty("InputWorkspace"); EventWorkspace_sptr outputWS = getProperty("OutputWorkspace"); - const double toleranceTof = getProperty("Tolerance"); + double toleranceTof = getProperty("Tolerance"); const double toleranceWallClock = getProperty("WallClockTolerance"); const bool compressFat = !isEmpty(toleranceWallClock); + + BINMODE mode = getPropertyValue("BinningMode"); + if (mode == BinningMode::LINEAR) + toleranceTof = std::fabs(toleranceTof); + else if (mode == BinningMode::LOGARITHMIC) + toleranceTof = -1. * std::fabs(toleranceTof); + Types::Core::DateAndTime startTime; if (compressFat) { diff --git a/docs/source/algorithms/CompressEvents-v1.rst b/docs/source/algorithms/CompressEvents-v1.rst index 31d04b4c265e..0e323c4861a2 100644 --- a/docs/source/algorithms/CompressEvents-v1.rst +++ b/docs/source/algorithms/CompressEvents-v1.rst @@ -68,6 +68,11 @@ appear in the ``OutputWorkspace``. If it is not specified, then the format for the ``StartTime`` is ``2010-09-14T04:20:12``. Normally this parameter can be left unset. +Logarithmic binning +################### + +If you provide a negative tolerance or select ``Logarithmic`` as the ``BinningMode`` then the events will be combined together in increase large tolerances starting from the smallest TOF value. This follows the same method as the logarithmic binning of :ref:`algm-Rebin`. + Usage ----- From 0f20467e6dd0b46d693edf5c5e40e89770e2565e Mon Sep 17 00:00:00 2001 From: Ross Whitfield Date: Wed, 24 Apr 2024 16:00:14 +1000 Subject: [PATCH 05/11] Add release notes --- .../release/v6.10.0/Framework/Algorithms/New_features/37203.rst | 1 + 1 file changed, 1 insertion(+) create mode 100644 docs/source/release/v6.10.0/Framework/Algorithms/New_features/37203.rst diff --git a/docs/source/release/v6.10.0/Framework/Algorithms/New_features/37203.rst b/docs/source/release/v6.10.0/Framework/Algorithms/New_features/37203.rst new file mode 100644 index 000000000000..4eb0544f031a --- /dev/null +++ b/docs/source/release/v6.10.0/Framework/Algorithms/New_features/37203.rst @@ -0,0 +1 @@ +- Add the ability for :ref:`algm-CompressEvents` to combine events together in a logarithmic increasing size groups. From 29b179ec22c83e2561a824434cdcf885c045eea3 Mon Sep 17 00:00:00 2001 From: Ross Whitfield Date: Wed, 24 Apr 2024 16:12:10 +1000 Subject: [PATCH 06/11] Update CppCheck_Suppressions --- buildconfig/CMake/CppCheck_Suppressions.txt.in | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/buildconfig/CMake/CppCheck_Suppressions.txt.in b/buildconfig/CMake/CppCheck_Suppressions.txt.in index c7d56e764c05..6fdb7f6109f6 100644 --- a/buildconfig/CMake/CppCheck_Suppressions.txt.in +++ b/buildconfig/CMake/CppCheck_Suppressions.txt.in @@ -814,11 +814,11 @@ virtualCallInConstructor:${CMAKE_SOURCE_DIR}/Framework/DataObjects/inc/MantidDat identicalConditionAfterEarlyExit:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:999 identicalConditionAfterEarlyExit:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:1095 virtualCallInConstructor:${CMAKE_SOURCE_DIR}/Framework/DataObjects/inc/MantidDataObjects/TableWorkspace.h:199 -constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:3890 -constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:4037 -constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:4164 -constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:4165 -derefInvalidIterator:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:1560 +constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:3989 +constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:4136 +constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:4263 +constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:4264 +derefInvalidIterator:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:1600 constVariableReference:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/TableWorkspace.cpp:253 constVariablePointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/TableWorkspace.cpp:318 cstyleCast:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/LeanElasticPeaksWorkspace.cpp:620 From bcf53429da2df4ba089b206eb223259958852d15 Mon Sep 17 00:00:00 2001 From: Ross Whitfield Date: Fri, 26 Apr 2024 13:19:43 +1000 Subject: [PATCH 07/11] Update EventList compress helpers to return code duplication --- Framework/DataObjects/src/EventList.cpp | 282 ++++++++++-------------- 1 file changed, 121 insertions(+), 161 deletions(-) diff --git a/Framework/DataObjects/src/EventList.cpp b/Framework/DataObjects/src/EventList.cpp index c3deccce517d..51098a9d9c74 100644 --- a/Framework/DataObjects/src/EventList.cpp +++ b/Framework/DataObjects/src/EventList.cpp @@ -1462,7 +1462,7 @@ inline void EventList::compressEventsHelper(const std::vector &events, std::v out.reserve(events.size() / 20); // The last TOF to which we are comparing. - double lastTof = std::numeric_limits::lowest(); + double lastTof = events.front().m_tof; // For getting an accurate average TOF double totalTof = 0; int num = 0; @@ -1471,74 +1471,58 @@ inline void EventList::compressEventsHelper(const std::vector &events, std::v double errorSquared = 0; double normalization = 0.; - if (tolerance < 0) { - lastTof = events.front().m_tof; - double bin_end = lastTof * (1 - tolerance); - - for (auto it = events.cbegin(); it != events.cend(); it++) { - if (it->m_tof < bin_end) { - // Carry the error and weight - weight += it->weight(); - errorSquared += it->errorSquared(); - // Track the average tof - num++; - const double norm = calcNorm(it->errorSquared()); - normalization += norm; - totalTof += it->m_tof * norm; - } else { - // We exceeded the tolerance - // Create a new event with the average TOF and summed weights and - // squared errors. - if (num == 1) { - // last time-of-flight is the only one contributing - out.emplace_back(lastTof, weight, errorSquared); - } else if (num > 1) { - out.emplace_back(totalTof / normalization, weight, errorSquared); - } - // Start a new combined object - num = 1; - const double norm = calcNorm(it->errorSquared()); - normalization = norm; - totalTof = it->m_tof * norm; - weight = it->weight(); - errorSquared = it->errorSquared(); - lastTof = it->m_tof; - - // advance the bin_end until we find the one that this next event falls into - while (lastTof >= bin_end) - bin_end = bin_end * (1 - tolerance); - } - } - } else { - for (auto it = events.cbegin(); it != events.cend(); it++) { - if ((it->m_tof - lastTof) <= tolerance) { - // Carry the error and weight - weight += it->weight(); - errorSquared += it->errorSquared(); - // Track the average tof - num++; - const double norm = calcNorm(it->errorSquared()); - normalization += norm; - totalTof += it->m_tof * norm; - } else { - // We exceeded the tolerance - // Create a new event with the average TOF and summed weights and - // squared errors. - if (num == 1) { - // last time-of-flight is the only one contributing - out.emplace_back(lastTof, weight, errorSquared); - } else if (num > 1) { - out.emplace_back(totalTof / normalization, weight, errorSquared); - } - // Start a new combined object - num = 1; - const double norm = calcNorm(it->errorSquared()); - normalization = norm; - totalTof = it->m_tof * norm; - weight = it->weight(); - errorSquared = it->errorSquared(); - lastTof = it->m_tof; + double bin_end = lastTof; + std::function compareTof; + std::function next_bin; + + if (tolerance < 0) { // log + // for log we do "less than" so that is matches the log binning of the Rebin algorithm + compareTof = [](const double lhs, const double rhs) { return lhs < rhs; }; + next_bin = [tolerance](const double lastTof, double bin_end) { + // advance the bin_end until we find the one that this next event falls into + while (lastTof >= bin_end) + bin_end = bin_end * (1 - tolerance); + return bin_end; + }; + } else { // linear + // for linear we do "less than or equals" because that is how it was originally implemented + compareTof = [](const double lhs, const double rhs) { return lhs <= rhs; }; + next_bin = [tolerance](const double lastTof, double) { return lastTof + tolerance; }; + } + + // get first bin_end + bin_end = next_bin(lastTof, bin_end); + + for (auto it = events.cbegin(); it != events.cend(); it++) { + if (compareTof(it->m_tof, bin_end)) { + // Carry the error and weight + weight += it->weight(); + errorSquared += it->errorSquared(); + // Track the average tof + num++; + const double norm = calcNorm(it->errorSquared()); + normalization += norm; + totalTof += it->m_tof * norm; + } else { + // We exceeded the tolerance + // Create a new event with the average TOF and summed weights and + // squared errors. + if (num == 1) { + // last time-of-flight is the only one contributing + out.emplace_back(lastTof, weight, errorSquared); + } else if (num > 1) { + out.emplace_back(totalTof / normalization, weight, errorSquared); } + // Start a new combined object + num = 1; + const double norm = calcNorm(it->errorSquared()); + normalization = norm; + totalTof = it->m_tof * norm; + weight = it->weight(); + errorSquared = it->errorSquared(); + lastTof = it->m_tof; + + bin_end = next_bin(lastTof, bin_end); } } @@ -1568,7 +1552,7 @@ inline void EventList::compressFatEventsHelper(const std::vector &events, std out.reserve(events.size() / 20); // The last TOF to which we are comparing. - double lastTof = std::numeric_limits::lowest(); + double lastTof = events.front().m_tof; // For getting an accurate average TOF double totalTof = 0; @@ -1599,103 +1583,79 @@ inline void EventList::compressFatEventsHelper(const std::vector &events, std // bin if the pulses are histogrammed int64_t lastPulseBin = (it->m_pulsetime.totalNanoseconds() - pulsetimeStart) / pulsetimeDelta; - if (tolerance < 0) { + double bin_end = lastTof; + double tof_min{0}; + std::function compareTof; + std::function next_bin; + + if (tolerance < 0) { // log + // for log we do "less than" so that is matches the log binning of the Rebin algorithm + compareTof = [](const double lhs, const double rhs) { return lhs < rhs; }; + next_bin = [tolerance](const double lastTof, double bin_end) { + // advance the bin_end until we find the one that this next event falls into + while (lastTof >= bin_end) + bin_end = bin_end * (1 - tolerance); + return bin_end; + }; + // get minimum Tof so that binning is consistent across all pulses const auto event_min = std::min_element( events.cbegin(), events.cend(), [](const auto &left, const auto &right) { return left.tof() < right.tof(); }); - const auto tof_min = event_min->tof(); - - lastTof = events.front().m_tof; - double bin_end = tof_min * (1 - tolerance); - - // loop through events and accumulate weight - for (; it != events.cend(); ++it) { - const int64_t eventPulseBin = (it->m_pulsetime.totalNanoseconds() - pulsetimeStart) / pulsetimeDelta; - if ((eventPulseBin <= lastPulseBin) && (it->m_tof < bin_end)) { - // Carry the error and weight - weight += it->weight(); - errorSquared += it->errorSquared(); - double norm = calcNorm(it->errorSquared()); - tofNormalization += norm; - // Track the average tof - totalTof += it->m_tof * norm; - // Accumulate the pulse times - pulsetimes.emplace_back(it->m_pulsetime); - pulsetimeWeights.emplace_back(norm); - } else { - // We exceeded the tolerance - if (!pulsetimes.empty()) { - // Create a new event with the average TOF and summed weights and - // squared errors. 1 event used doesn't need to average - if (pulsetimes.size() == 1) { - out.emplace_back(lastTof, pulsetimes.front(), weight, errorSquared); - } else { - out.emplace_back(totalTof / tofNormalization, - Kernel::DateAndTimeHelpers::averageSorted(pulsetimes, pulsetimeWeights), weight, - errorSquared); - } - } - // Start a new combined object - double norm = calcNorm(it->errorSquared()); - totalTof = it->m_tof * norm; - weight = it->weight(); - errorSquared = it->errorSquared(); - tofNormalization = norm; - lastTof = it->m_tof; - if (eventPulseBin != lastPulseBin) - bin_end = tof_min * (1 - tolerance); - lastPulseBin = eventPulseBin; - pulsetimes.clear(); - pulsetimes.emplace_back(it->m_pulsetime); - pulsetimeWeights.clear(); - pulsetimeWeights.emplace_back(norm); - - // advance the bin_end until we find the one that this next event falls into - while (lastTof >= bin_end) - bin_end = bin_end * (1 - tolerance); - } - } - } else { - // loop through events and accumulate weight - for (; it != events.cend(); ++it) { - const int64_t eventPulseBin = (it->m_pulsetime.totalNanoseconds() - pulsetimeStart) / pulsetimeDelta; - if ((eventPulseBin <= lastPulseBin) && (std::fabs(it->m_tof - lastTof) <= tolerance)) { - // Carry the error and weight - weight += it->weight(); - errorSquared += it->errorSquared(); - double norm = calcNorm(it->errorSquared()); - tofNormalization += norm; - // Track the average tof - totalTof += it->m_tof * norm; - // Accumulate the pulse times - pulsetimes.emplace_back(it->m_pulsetime); - pulsetimeWeights.emplace_back(norm); - } else { - // We exceeded the tolerance - if (!pulsetimes.empty()) { - // Create a new event with the average TOF and summed weights and - // squared errors. 1 event used doesn't need to average - if (pulsetimes.size() == 1) { - out.emplace_back(lastTof, pulsetimes.front(), weight, errorSquared); - } else { - out.emplace_back(totalTof / tofNormalization, - Kernel::DateAndTimeHelpers::averageSorted(pulsetimes, pulsetimeWeights), weight, - errorSquared); - } + bin_end = tof_min = event_min->tof(); + } else { // linear + // for linear we do "less than or equals" because that is how it was originally implemented + compareTof = [](const double lhs, const double rhs) { return lhs <= rhs; }; + next_bin = [tolerance](const double lastTof, double) { return lastTof + tolerance; }; + } + + // get first bin_end + bin_end = next_bin(lastTof, bin_end); + + // loop through events and accumulate weight + for (; it != events.cend(); ++it) { + const int64_t eventPulseBin = (it->m_pulsetime.totalNanoseconds() - pulsetimeStart) / pulsetimeDelta; + if ((eventPulseBin <= lastPulseBin) && compareTof(it->m_tof, bin_end)) { + // Carry the error and weight + weight += it->weight(); + errorSquared += it->errorSquared(); + double norm = calcNorm(it->errorSquared()); + tofNormalization += norm; + // Track the average tof + totalTof += it->m_tof * norm; + // Accumulate the pulse times + pulsetimes.emplace_back(it->m_pulsetime); + pulsetimeWeights.emplace_back(norm); + } else { + // We exceeded the tolerance + if (!pulsetimes.empty()) { + // Create a new event with the average TOF and summed weights and + // squared errors. 1 event used doesn't need to average + if (pulsetimes.size() == 1) { + out.emplace_back(lastTof, pulsetimes.front(), weight, errorSquared); + } else { + out.emplace_back(totalTof / tofNormalization, + Kernel::DateAndTimeHelpers::averageSorted(pulsetimes, pulsetimeWeights), weight, + errorSquared); } - // Start a new combined object - double norm = calcNorm(it->errorSquared()); - totalTof = it->m_tof * norm; - weight = it->weight(); - errorSquared = it->errorSquared(); - tofNormalization = norm; - lastTof = it->m_tof; - lastPulseBin = eventPulseBin; - pulsetimes.clear(); - pulsetimes.emplace_back(it->m_pulsetime); - pulsetimeWeights.clear(); - pulsetimeWeights.emplace_back(norm); } + if (tolerance < 0 && eventPulseBin != lastPulseBin) + // reset the bin_end for the new pulse bin + bin_end = tof_min; + + // Start a new combined object + double norm = calcNorm(it->errorSquared()); + totalTof = it->m_tof * norm; + weight = it->weight(); + errorSquared = it->errorSquared(); + tofNormalization = norm; + lastTof = it->m_tof; + lastPulseBin = eventPulseBin; + pulsetimes.clear(); + pulsetimes.emplace_back(it->m_pulsetime); + pulsetimeWeights.clear(); + pulsetimeWeights.emplace_back(norm); + + bin_end = next_bin(lastTof, bin_end); } } From bd25ceeff9b3b7c35eae51de750d741391c2b9ab Mon Sep 17 00:00:00 2001 From: Ross Whitfield Date: Fri, 26 Apr 2024 14:56:28 +1000 Subject: [PATCH 08/11] Update CompressEventsTest to compare DateAndTime --- .../DataHandling/test/CompressEventsTest.h | 20 ++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/Framework/DataHandling/test/CompressEventsTest.h b/Framework/DataHandling/test/CompressEventsTest.h index be10a5fdb657..3b64483b9858 100644 --- a/Framework/DataHandling/test/CompressEventsTest.h +++ b/Framework/DataHandling/test/CompressEventsTest.h @@ -20,6 +20,7 @@ using namespace Mantid::DataHandling; using namespace Mantid::API; using namespace Mantid::Geometry; using namespace Mantid::DataObjects; +using namespace Mantid::Types::Core; class CompressEventsTest : public CxxTest::TestSuite { public: @@ -178,17 +179,18 @@ class CompressEventsTest : public CxxTest::TestSuite { } if (wallClockTolerance > 0.) { - int64_t firstTime = 631152000000000000; - TS_ASSERT_EQUALS(el.getEvent(0).pulseTime().totalNanoseconds(), firstTime); - TS_ASSERT_EQUALS(el.getEvent(1).pulseTime().totalNanoseconds(), firstTime + 1000000000); - TS_ASSERT_EQUALS(el.getEvent(2).pulseTime().totalNanoseconds(), firstTime + 2500000000); - TS_ASSERT_EQUALS(el.getEvent(3).pulseTime().totalNanoseconds(), firstTime + 5500000000); - TS_ASSERT_EQUALS(el.getEvent(4).pulseTime().totalNanoseconds(), firstTime + 11500000000); - TS_ASSERT_EQUALS(el.getEvent(5).pulseTime().totalNanoseconds(), firstTime + 23500000000); - TS_ASSERT_EQUALS(el.getEvent(6).pulseTime().totalNanoseconds(), firstTime + 47500000000); + const auto startTime = DateAndTime("2010-01-01T00:00:00"); + TS_ASSERT_EQUALS(el.getEvent(0).pulseTime(), startTime); + TS_ASSERT_EQUALS(el.getEvent(1).pulseTime(), startTime + 1.0); + TS_ASSERT_EQUALS(el.getEvent(2).pulseTime(), startTime + 2.5); + TS_ASSERT_EQUALS(el.getEvent(3).pulseTime(), startTime + 5.5); + TS_ASSERT_EQUALS(el.getEvent(4).pulseTime(), startTime + 11.5); + TS_ASSERT_EQUALS(el.getEvent(5).pulseTime(), startTime + 23.5); + TS_ASSERT_EQUALS(el.getEvent(6).pulseTime(), startTime + 47.5); } else { + const auto timeZero = DateAndTime{0}; for (int i = 0; i < 7; i++) { - TS_ASSERT_EQUALS(el.getEvent(i).pulseTime().totalNanoseconds(), 0); + TS_ASSERT_EQUALS(el.getEvent(i).pulseTime(), timeZero); } } } From 367599e0f6a5aa050996dd5ae499626117b3b2d6 Mon Sep 17 00:00:00 2001 From: Ross Whitfield Date: Fri, 26 Apr 2024 15:30:44 +1000 Subject: [PATCH 09/11] Update CppCheck_Suppressions --- buildconfig/CMake/CppCheck_Suppressions.txt.in | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/buildconfig/CMake/CppCheck_Suppressions.txt.in b/buildconfig/CMake/CppCheck_Suppressions.txt.in index 6fdb7f6109f6..6a369aa4659e 100644 --- a/buildconfig/CMake/CppCheck_Suppressions.txt.in +++ b/buildconfig/CMake/CppCheck_Suppressions.txt.in @@ -814,11 +814,11 @@ virtualCallInConstructor:${CMAKE_SOURCE_DIR}/Framework/DataObjects/inc/MantidDat identicalConditionAfterEarlyExit:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:999 identicalConditionAfterEarlyExit:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:1095 virtualCallInConstructor:${CMAKE_SOURCE_DIR}/Framework/DataObjects/inc/MantidDataObjects/TableWorkspace.h:199 -constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:3989 -constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:4136 -constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:4263 -constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:4264 -derefInvalidIterator:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:1600 +constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:3949 +constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:4096 +constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:4223 +constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:4224 +derefInvalidIterator:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:1584 constVariableReference:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/TableWorkspace.cpp:253 constVariablePointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/TableWorkspace.cpp:318 cstyleCast:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/LeanElasticPeaksWorkspace.cpp:620 From 24615f8e045bf8016ccf16a39234d0bc56971b76 Mon Sep 17 00:00:00 2001 From: Ross Whitfield Date: Tue, 30 Apr 2024 10:56:46 +1000 Subject: [PATCH 10/11] Correctly handle negative and zero TOF --- Framework/DataObjects/src/EventList.cpp | 13 +++++ Framework/DataObjects/test/EventListTest.h | 56 ++++++++++++++++++++ docs/source/algorithms/CompressEvents-v1.rst | 2 +- 3 files changed, 70 insertions(+), 1 deletion(-) diff --git a/Framework/DataObjects/src/EventList.cpp b/Framework/DataObjects/src/EventList.cpp index 51098a9d9c74..b6dbefee7d16 100644 --- a/Framework/DataObjects/src/EventList.cpp +++ b/Framework/DataObjects/src/EventList.cpp @@ -1476,6 +1476,12 @@ inline void EventList::compressEventsHelper(const std::vector &events, std::v std::function next_bin; if (tolerance < 0) { // log + if (lastTof < 0) + throw std::runtime_error("compressEvents with log binning doesn't work with negative TOF"); + + if (lastTof == 0) + bin_end = fabs(tolerance); + // for log we do "less than" so that is matches the log binning of the Rebin algorithm compareTof = [](const double lhs, const double rhs) { return lhs < rhs; }; next_bin = [tolerance](const double lastTof, double bin_end) { @@ -1602,6 +1608,13 @@ inline void EventList::compressFatEventsHelper(const std::vector &events, std const auto event_min = std::min_element( events.cbegin(), events.cend(), [](const auto &left, const auto &right) { return left.tof() < right.tof(); }); bin_end = tof_min = event_min->tof(); + + if (tof_min < 0) + throw std::runtime_error("compressEvents with log binning doesn't work with negative TOF"); + + if (tof_min == 0) + bin_end = fabs(tolerance); + } else { // linear // for linear we do "less than or equals" because that is how it was originally implemented compareTof = [](const double lhs, const double rhs) { return lhs <= rhs; }; diff --git a/Framework/DataObjects/test/EventListTest.h b/Framework/DataObjects/test/EventListTest.h index fd80bff12673..427840ef8241 100644 --- a/Framework/DataObjects/test/EventListTest.h +++ b/Framework/DataObjects/test/EventListTest.h @@ -2226,6 +2226,34 @@ class EventListTest : public CxxTest::TestSuite { TS_ASSERT_DELTA(el_output.getEvent(2).tof(), 100000, 1e-5) } + void test_compressEvents_log3() { + // check the behavior when TOF is zero or negative + el = EventList(); + el += TofEvent(0, 0); + el += TofEvent(0.5, 0); + el += TofEvent(1, 0); + + // Do compress events with log binning + // Since there is a tof==0 then the first bin_end should be 1 + EventList el_output; + TS_ASSERT_THROWS_NOTHING(el.compressEvents(-1, &el_output)) + + // now check individual events + TS_ASSERT_EQUALS(el_output.getNumberEvents(), 2) + + TS_ASSERT_EQUALS(el_output.getEvent(0).weight(), 2) + TS_ASSERT_EQUALS(el_output.getEvent(0).errorSquared(), 2) + TS_ASSERT_DELTA(el_output.getEvent(0).tof(), 0.25, 1e-5) + + TS_ASSERT_EQUALS(el_output.getEvent(1).weight(), 1) + TS_ASSERT_EQUALS(el_output.getEvent(1).errorSquared(), 1) + TS_ASSERT_DELTA(el_output.getEvent(1).tof(), 1, 1e-5) + + // now add a negative TOF and it should throw + el += TofEvent(-1, 0); + TS_ASSERT_THROWS(el.compressEvents(-1, &el_output), const std::runtime_error &) + } + void test_compressFatEvents_log() { el = EventList(); for (int pulseTime = 0; pulseTime < 5; pulseTime++) @@ -2350,6 +2378,34 @@ class EventListTest : public CxxTest::TestSuite { TS_ASSERT_DELTA(el_output.getEvent(3).pulseTime().totalNanoseconds(), 6000000000, 1e-5) } + void test_compressFatEvents_log3() { + // check the behavior when TOF is zero or negative + el = EventList(); + el += TofEvent(0.5, 1); + el += TofEvent(1, 2); + el += TofEvent(0, 3); + + // Do compress events with log binning + // Since there is a tof==0 then the first bin_end should be 1 + EventList el_output; + TS_ASSERT_THROWS_NOTHING(el.compressFatEvents(-1, DateAndTime{0}, 10, &el_output)) + + // now check individual events + TS_ASSERT_EQUALS(el_output.getNumberEvents(), 2) + + TS_ASSERT_EQUALS(el_output.getEvent(0).weight(), 2) + TS_ASSERT_EQUALS(el_output.getEvent(0).errorSquared(), 2) + TS_ASSERT_DELTA(el_output.getEvent(0).tof(), 0.25, 1e-5) + + TS_ASSERT_EQUALS(el_output.getEvent(1).weight(), 1) + TS_ASSERT_EQUALS(el_output.getEvent(1).errorSquared(), 1) + TS_ASSERT_DELTA(el_output.getEvent(1).tof(), 1, 1e-5) + + // now add a negative TOF and it should throw + el += TofEvent(-1, 0); + TS_ASSERT_THROWS(el.compressFatEvents(-1, DateAndTime{0}, 10, &el_output), const std::runtime_error &) + } + //================================================================================== // Mocking functions //================================================================================== diff --git a/docs/source/algorithms/CompressEvents-v1.rst b/docs/source/algorithms/CompressEvents-v1.rst index 0e323c4861a2..af903ff1b8f0 100644 --- a/docs/source/algorithms/CompressEvents-v1.rst +++ b/docs/source/algorithms/CompressEvents-v1.rst @@ -71,7 +71,7 @@ parameter can be left unset. Logarithmic binning ################### -If you provide a negative tolerance or select ``Logarithmic`` as the ``BinningMode`` then the events will be combined together in increase large tolerances starting from the smallest TOF value. This follows the same method as the logarithmic binning of :ref:`algm-Rebin`. +If you provide a negative tolerance or select ``Logarithmic`` as the ``BinningMode`` then the events will be combined together in increase large tolerances starting from the smallest TOF value. This follows the same method as the logarithmic binning of :ref:`algm-Rebin`. This mode will fail if any of the TOF values are negative. Usage ----- From 2af185d473203c3ec16598b19c2acd22b49bb8bb Mon Sep 17 00:00:00 2001 From: Ross Whitfield Date: Tue, 30 Apr 2024 11:07:55 +1000 Subject: [PATCH 11/11] Update CppCheck_Suppressions --- buildconfig/CMake/CppCheck_Suppressions.txt.in | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/buildconfig/CMake/CppCheck_Suppressions.txt.in b/buildconfig/CMake/CppCheck_Suppressions.txt.in index 6a369aa4659e..7bcece0e65b0 100644 --- a/buildconfig/CMake/CppCheck_Suppressions.txt.in +++ b/buildconfig/CMake/CppCheck_Suppressions.txt.in @@ -814,11 +814,11 @@ virtualCallInConstructor:${CMAKE_SOURCE_DIR}/Framework/DataObjects/inc/MantidDat identicalConditionAfterEarlyExit:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:999 identicalConditionAfterEarlyExit:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:1095 virtualCallInConstructor:${CMAKE_SOURCE_DIR}/Framework/DataObjects/inc/MantidDataObjects/TableWorkspace.h:199 -constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:3949 -constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:4096 -constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:4223 -constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:4224 -derefInvalidIterator:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:1584 +constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:3962 +constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:4109 +constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:4236 +constParameterPointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:4237 +derefInvalidIterator:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/EventList.cpp:1590 constVariableReference:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/TableWorkspace.cpp:253 constVariablePointer:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/TableWorkspace.cpp:318 cstyleCast:${CMAKE_SOURCE_DIR}/Framework/DataObjects/src/LeanElasticPeaksWorkspace.cpp:620