diff --git a/src/Makefile.test.include b/src/Makefile.test.include index 06dde87ddd3e8..368ca638b09c3 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -275,6 +275,7 @@ BITCOIN_TESTS =\ test/torcontrol_tests.cpp \ test/transaction_tests.cpp \ test/txindex_tests.cpp \ + test/txrequest_tests.cpp \ test/txvalidation_tests.cpp \ test/txvalidationcache_tests.cpp \ test/uint256_tests.cpp \ diff --git a/src/test/txrequest_tests.cpp b/src/test/txrequest_tests.cpp new file mode 100644 index 0000000000000..46927059aeaeb --- /dev/null +++ b/src/test/txrequest_tests.cpp @@ -0,0 +1,711 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + + +#include +#include + +#include + +#include +#include +#include + +#include + +BOOST_FIXTURE_TEST_SUITE(txrequest_tests, BasicTestingSetup) + +namespace { + +constexpr std::chrono::microseconds MIN_TIME = std::chrono::microseconds::min(); +constexpr std::chrono::microseconds MAX_TIME = std::chrono::microseconds::max(); +constexpr std::chrono::microseconds MICROSECOND = std::chrono::microseconds{1}; +constexpr std::chrono::microseconds NO_TIME = std::chrono::microseconds{0}; + +/** An Action is a function to call at a particular (simulated) timestamp. */ +using Action = std::pair>; + +/** Object that stores actions from multiple interleaved scenarios, and data shared across them. + * + * The Scenario below is used to fill this. + */ +struct Runner +{ + /** The TxRequestTracker being tested. */ + TxRequestTracker txrequest; + + /** List of actions to be executed (in order of increasing timestamp). */ + std::vector actions; + + /** Which node ids have been assigned already (to prevent reuse). */ + std::set peerset; + + /** Which txhashes have been assigned already (to prevent reuse). */ + std::set txhashset; +}; + +std::chrono::microseconds RandomTime8s() { return std::chrono::microseconds{1 + InsecureRandBits(23)}; } +std::chrono::microseconds RandomTime1y() { return std::chrono::microseconds{1 + InsecureRandBits(45)}; } + +/** A proxy for a Runner that helps build a sequence of consecutive test actions on a TxRequestTracker. + * + * Each Scenario is a proxy through which actions for the (sequential) execution of various tests are added to a + * Runner. The actions from multiple scenarios are then run concurrently, resulting in these tests being performed + * against a TxRequestTracker in parallel. Every test has its own unique txhashes and NodeIds which are not + * reused in other tests, and thus they should be independent from each other. Running them in parallel however + * means that we verify the behavior (w.r.t. one test's txhashes and NodeIds) even when the state of the data + * structure is more complicated due to the presence of other tests. + */ +class Scenario +{ + Runner& m_runner; + std::chrono::microseconds m_now; + std::string m_testname; + +public: + Scenario(Runner& runner, std::chrono::microseconds starttime) : m_runner(runner), m_now(starttime) {} + + /** Set a name for the current test, to give more clear error messages. */ + void SetTestName(std::string testname) + { + m_testname = std::move(testname); + } + + /** Advance this Scenario's time; this affects the timestamps newly scheduled events get. */ + void AdvanceTime(std::chrono::microseconds amount) + { + assert(amount.count() >= 0); + m_now += amount; + } + + /** Schedule a ForgetTxHash call at the Scheduler's current time. */ + void ForgetTxHash(const uint256& txhash) + { + auto& runner = m_runner; + runner.actions.emplace_back(m_now, [=,&runner]() { + runner.txrequest.ForgetTxHash(txhash); + runner.txrequest.SanityCheck(); + }); + } + + /** Schedule a ReceivedInv call at the Scheduler's current time. */ + void ReceivedInv(NodeId peer, const GenTxid& gtxid, bool pref, std::chrono::microseconds reqtime) + { + auto& runner = m_runner; + runner.actions.emplace_back(m_now, [=,&runner]() { + runner.txrequest.ReceivedInv(peer, gtxid, pref, reqtime); + runner.txrequest.SanityCheck(); + }); + } + + /** Schedule a DisconnectedPeer call at the Scheduler's current time. */ + void DisconnectedPeer(NodeId peer) + { + auto& runner = m_runner; + runner.actions.emplace_back(m_now, [=,&runner]() { + runner.txrequest.DisconnectedPeer(peer); + runner.txrequest.SanityCheck(); + }); + } + + /** Schedule a RequestedTx call at the Scheduler's current time. */ + void RequestedTx(NodeId peer, const uint256& txhash, std::chrono::microseconds exptime) + { + auto& runner = m_runner; + runner.actions.emplace_back(m_now, [=,&runner]() { + runner.txrequest.RequestedTx(peer, txhash, exptime); + runner.txrequest.SanityCheck(); + }); + } + + /** Schedule a ReceivedResponse call at the Scheduler's current time. */ + void ReceivedResponse(NodeId peer, const uint256& txhash) + { + auto& runner = m_runner; + runner.actions.emplace_back(m_now, [=,&runner]() { + runner.txrequest.ReceivedResponse(peer, txhash); + runner.txrequest.SanityCheck(); + }); + } + + /** Schedule calls to verify the TxRequestTracker's state at the Scheduler's current time. + * + * @param peer The peer whose state will be inspected. + * @param expected The expected return value for GetRequestable(peer) + * @param candidates The expected return value CountCandidates(peer) + * @param inflight The expected return value CountInFlight(peer) + * @param completed The expected return value of Count(peer), minus candidates and inflight. + * @param checkname An arbitrary string to include in error messages, for test identificatrion. + * @param offset Offset with the current time to use (must be <= 0). This allows simulations of time going + * backwards (but note that the ordering of this event only follows the scenario's m_now. + */ + void Check(NodeId peer, const std::vector& expected, size_t candidates, size_t inflight, + size_t completed, const std::string& checkname, + std::chrono::microseconds offset = std::chrono::microseconds{0}) + { + const auto comment = m_testname + " " + checkname; + auto& runner = m_runner; + const auto now = m_now; + assert(offset.count() <= 0); + runner.actions.emplace_back(m_now, [=,&runner]() { + auto ret = runner.txrequest.GetRequestable(peer, now + offset); + runner.txrequest.SanityCheck(); + runner.txrequest.PostGetRequestableSanityCheck(now + offset); + size_t total = candidates + inflight + completed; + size_t real_total = runner.txrequest.Count(peer); + size_t real_candidates = runner.txrequest.CountCandidates(peer); + size_t real_inflight = runner.txrequest.CountInFlight(peer); + BOOST_CHECK_MESSAGE(real_total == total, strprintf("[" + comment + "] total %i (%i expected)", real_total, total)); + BOOST_CHECK_MESSAGE(real_inflight == inflight, strprintf("[" + comment + "] inflight %i (%i expected)", real_inflight, inflight)); + BOOST_CHECK_MESSAGE(real_candidates == candidates, strprintf("[" + comment + "] candidates %i (%i expected)", real_candidates, candidates)); + BOOST_CHECK_MESSAGE(ret == expected, "[" + comment + "] mismatching requestables"); + }); + } + + /** Generate a random txhash, whose priorities for certain peers are constrained. + * + * For example, NewTxHash({{p1,p2,p3},{p2,p4,p5}}) will generate a txhash T such that both: + * - priority(p1,T) > priority(p2,T) > priority(p3,T) + * - priority(p2,T) > priority(p4,T) > priority(p5,T) + * where priority is the predicted internal TxRequestTracker's priority, assuming all announcements + * are within the same preferredness class. + */ + uint256 NewTxHash(const std::vector>& orders = {}) + { + uint256 ret; + bool ok; + do { + ret = InsecureRand256(); + ok = true; + for (const auto& order : orders) { + for (size_t pos = 1; pos < order.size(); ++pos) { + uint64_t prio_prev = m_runner.txrequest.ComputePriority(ret, order[pos - 1], true); + uint64_t prio_cur = m_runner.txrequest.ComputePriority(ret, order[pos], true); + if (prio_prev <= prio_cur) { + ok = false; + break; + } + } + if (!ok) break; + } + if (ok) { + ok = m_runner.txhashset.insert(ret).second; + } + } while(!ok); + return ret; + } + + /** Generate a random GenTxid; the txhash follows NewTxHash; the is_wtxid flag is random. */ + GenTxid NewGTxid(const std::vector>& orders = {}) + { + return {InsecureRandBool(), NewTxHash(orders)}; + } + + /** Generate a new random NodeId to use as peer. The same NodeId is never returned twice + * (across all Scenarios combined). */ + NodeId NewPeer() + { + bool ok; + NodeId ret; + do { + ret = InsecureRandBits(63); + ok = m_runner.peerset.insert(ret).second; + } while(!ok); + return ret; + } + + std::chrono::microseconds Now() const { return m_now; } +}; + +/** Add to scenario a test with a single tx announced by a single peer. + * + * config is an integer in [0, 32), which controls which variant of the test is used. + */ +void BuildSingleTest(Scenario& scenario, int config) +{ + auto peer = scenario.NewPeer(); + auto gtxid = scenario.NewGTxid(); + bool immediate = config & 1; + bool preferred = config & 2; + auto delay = immediate ? NO_TIME : RandomTime8s(); + + scenario.SetTestName(strprintf("Single(config=%i)", config)); + + // Receive an announcement, either immediately requestable or delayed. + scenario.ReceivedInv(peer, gtxid, preferred, immediate ? MIN_TIME : scenario.Now() + delay); + if (immediate) { + scenario.Check(peer, {gtxid}, 1, 0, 0, "s1"); + } else { + scenario.Check(peer, {}, 1, 0, 0, "s2"); + scenario.AdvanceTime(delay - MICROSECOND); + scenario.Check(peer, {}, 1, 0, 0, "s3"); + scenario.AdvanceTime(MICROSECOND); + scenario.Check(peer, {gtxid}, 1, 0, 0, "s4"); + } + + if (config >> 3) { // We'll request the transaction + scenario.AdvanceTime(RandomTime8s()); + auto expiry = RandomTime8s(); + scenario.Check(peer, {gtxid}, 1, 0, 0, "s5"); + scenario.RequestedTx(peer, gtxid.GetHash(), scenario.Now() + expiry); + scenario.Check(peer, {}, 0, 1, 0, "s6"); + + if ((config >> 3) == 1) { // The request will time out + scenario.AdvanceTime(expiry - MICROSECOND); + scenario.Check(peer, {}, 0, 1, 0, "s7"); + scenario.AdvanceTime(MICROSECOND); + scenario.Check(peer, {}, 0, 0, 0, "s8"); + return; + } else { + scenario.AdvanceTime(std::chrono::microseconds{InsecureRandRange(expiry.count())}); + scenario.Check(peer, {}, 0, 1, 0, "s9"); + if ((config >> 3) == 3) { // A response will arrive for the transaction + scenario.ReceivedResponse(peer, gtxid.GetHash()); + scenario.Check(peer, {}, 0, 0, 0, "s10"); + return; + } + } + } + + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + if (config & 4) { // The peer will go offline + scenario.DisconnectedPeer(peer); + } else { // The transaction is no longer needed + scenario.ForgetTxHash(gtxid.GetHash()); + } + scenario.Check(peer, {}, 0, 0, 0, "s11"); +} + +/** Add to scenario a test with a single tx announced by two peers, to verify the + * right peer is selected for requests. + * + * config is an integer in [0, 32), which controls which variant of the test is used. + */ +void BuildPriorityTest(Scenario& scenario, int config) +{ + scenario.SetTestName(strprintf("Priority(config=%i)", config)); + + // Two peers. They will announce in order {peer1, peer2}. + auto peer1 = scenario.NewPeer(), peer2 = scenario.NewPeer(); + // Construct a transaction that under random rules would be preferred by peer2 or peer1, + // depending on configuration. + bool prio1 = config & 1; + auto gtxid = prio1 ? scenario.NewGTxid({{peer1, peer2}}) : scenario.NewGTxid({{peer2, peer1}}); + bool pref1 = config & 2, pref2 = config & 4; + + scenario.ReceivedInv(peer1, gtxid, pref1, MIN_TIME); + scenario.Check(peer1, {gtxid}, 1, 0, 0, "p1"); + if (InsecureRandBool()) { + scenario.AdvanceTime(RandomTime8s()); + scenario.Check(peer1, {gtxid}, 1, 0, 0, "p2"); + } + + scenario.ReceivedInv(peer2, gtxid, pref2, MIN_TIME); + bool stage2_prio = + // At this point, peer2 will be given priority if: + // - It is preferred and peer1 is not + (pref2 && !pref1) || + // - They're in the same preference class, + // and the randomized priority favors peer2 over peer1. + (pref1 == pref2 && !prio1); + NodeId priopeer = stage2_prio ? peer2 : peer1, otherpeer = stage2_prio ? peer1 : peer2; + scenario.Check(otherpeer, {}, 1, 0, 0, "p3"); + scenario.Check(priopeer, {gtxid}, 1, 0, 0, "p4"); + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + scenario.Check(otherpeer, {}, 1, 0, 0, "p5"); + scenario.Check(priopeer, {gtxid}, 1, 0, 0, "p6"); + + // We possibly request from the selected peer. + if (config & 8) { + scenario.RequestedTx(priopeer, gtxid.GetHash(), MAX_TIME); + scenario.Check(priopeer, {}, 0, 1, 0, "p7"); + scenario.Check(otherpeer, {}, 1, 0, 0, "p8"); + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + } + + // The peer which was selected (or requested from) now goes offline, or a NOTFOUND is received from them. + if (config & 16) { + scenario.DisconnectedPeer(priopeer); + } else { + scenario.ReceivedResponse(priopeer, gtxid.GetHash()); + } + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + scenario.Check(priopeer, {}, 0, 0, !(config & 16), "p8"); + scenario.Check(otherpeer, {gtxid}, 1, 0, 0, "p9"); + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + + // Now the other peer goes offline. + scenario.DisconnectedPeer(otherpeer); + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + scenario.Check(peer1, {}, 0, 0, 0, "p10"); + scenario.Check(peer2, {}, 0, 0, 0, "p11"); +} + +/** Add to scenario a randomized test in which N peers announce the same transaction, to verify + * the order in which they are requested. */ +void BuildBigPriorityTest(Scenario& scenario, int peers) +{ + scenario.SetTestName(strprintf("BigPriority(peers=%i)", peers)); + + // We will have N peers announce the same transaction. + std::map preferred; + std::vector pref_peers, npref_peers; + int num_pref = InsecureRandRange(peers + 1) ; // Some preferred, ... + int num_npref = peers - num_pref; // some not preferred. + for (int i = 0; i < num_pref; ++i) { + pref_peers.push_back(scenario.NewPeer()); + preferred[pref_peers.back()] = true; + } + for (int i = 0; i < num_npref; ++i) { + npref_peers.push_back(scenario.NewPeer()); + preferred[npref_peers.back()] = false; + } + // Make a list of all peers, in order of intended request order (concatenation of pref_peers and npref_peers). + std::vector request_order; + for (int i = 0; i < num_pref; ++i) request_order.push_back(pref_peers[i]); + for (int i = 0; i < num_npref; ++i) request_order.push_back(npref_peers[i]); + + // Determine the announcement order randomly. + std::vector announce_order = request_order; + Shuffle(announce_order.begin(), announce_order.end(), g_insecure_rand_ctx); + + // Find a gtxid whose txhash prioritization is consistent with the required ordering within pref_peers and + // within npref_peers. + auto gtxid = scenario.NewGTxid({pref_peers, npref_peers}); + + // Decide reqtimes in opposite order of the expected request order. This means that as time passes we expect the + // to-be-requested-from-peer will change every time a subsequent reqtime is passed. + std::map reqtimes; + auto reqtime = scenario.Now(); + for (int i = peers - 1; i >= 0; --i) { + reqtime += RandomTime8s(); + reqtimes[request_order[i]] = reqtime; + } + + // Actually announce from all peers simultaneously (but in announce_order). + for (const auto peer : announce_order) { + scenario.ReceivedInv(peer, gtxid, preferred[peer], reqtimes[peer]); + } + for (const auto peer : announce_order) { + scenario.Check(peer, {}, 1, 0, 0, "b1"); + } + + // Let time pass and observe the to-be-requested-from peer change, from nonpreferred to preferred, and from + // high priority to low priority within each class. + for (int i = peers - 1; i >= 0; --i) { + scenario.AdvanceTime(reqtimes[request_order[i]] - scenario.Now() - MICROSECOND); + scenario.Check(request_order[i], {}, 1, 0, 0, "b2"); + scenario.AdvanceTime(MICROSECOND); + scenario.Check(request_order[i], {gtxid}, 1, 0, 0, "b3"); + } + + // Peers now in random order go offline, or send NOTFOUNDs. At every point in time the new to-be-requested-from + // peer should be the best remaining one, so verify this after every response. + for (int i = 0; i < peers; ++i) { + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + const int pos = InsecureRandRange(request_order.size()); + const auto peer = request_order[pos]; + request_order.erase(request_order.begin() + pos); + if (InsecureRandBool()) { + scenario.DisconnectedPeer(peer); + scenario.Check(peer, {}, 0, 0, 0, "b4"); + } else { + scenario.ReceivedResponse(peer, gtxid.GetHash()); + scenario.Check(peer, {}, 0, 0, request_order.size() > 0, "b5"); + } + if (request_order.size()) { + scenario.Check(request_order[0], {gtxid}, 1, 0, 0, "b6"); + } + } + + // Everything is gone in the end. + for (const auto peer : announce_order) { + scenario.Check(peer, {}, 0, 0, 0, "b7"); + } +} + +/** Add to scenario a test with one peer announcing two transactions, to verify they are + * fetched in announcement order. + * + * config is an integer in [0, 4) inclusive, and selects the variant of the test. + */ +void BuildRequestOrderTest(Scenario& scenario, int config) +{ + scenario.SetTestName(strprintf("RequestOrder(config=%i)", config)); + + auto peer = scenario.NewPeer(); + auto gtxid1 = scenario.NewGTxid(); + auto gtxid2 = scenario.NewGTxid(); + + auto reqtime2 = scenario.Now() + RandomTime8s(); + auto reqtime1 = reqtime2 + RandomTime8s(); + + scenario.ReceivedInv(peer, gtxid1, config & 1, reqtime1); + // Simulate time going backwards by giving the second announcement an earlier reqtime. + scenario.ReceivedInv(peer, gtxid2, config & 2, reqtime2); + + scenario.AdvanceTime(reqtime2 - MICROSECOND - scenario.Now()); + scenario.Check(peer, {}, 2, 0, 0, "o1"); + scenario.AdvanceTime(MICROSECOND); + scenario.Check(peer, {gtxid2}, 2, 0, 0, "o2"); + scenario.AdvanceTime(reqtime1 - MICROSECOND - scenario.Now()); + scenario.Check(peer, {gtxid2}, 2, 0, 0, "o3"); + scenario.AdvanceTime(MICROSECOND); + // Even with time going backwards in between announcements, the return value of GetRequestable is in + // announcement order. + scenario.Check(peer, {gtxid1, gtxid2}, 2, 0, 0, "o4"); + + scenario.DisconnectedPeer(peer); + scenario.Check(peer, {}, 0, 0, 0, "o5"); +} + +/** Add to scenario a test that verifies behavior related to both txid and wtxid with the same + * hash being announced. + * + * config is an integer in [0, 4) inclusive, and selects the variant of the test used. +*/ +void BuildWtxidTest(Scenario& scenario, int config) +{ + scenario.SetTestName(strprintf("Wtxid(config=%i)", config)); + + auto peerT = scenario.NewPeer(); + auto peerW = scenario.NewPeer(); + auto txhash = scenario.NewTxHash(); + GenTxid txid{false, txhash}; + GenTxid wtxid{true, txhash}; + + auto reqtimeT = InsecureRandBool() ? MIN_TIME : scenario.Now() + RandomTime8s(); + auto reqtimeW = InsecureRandBool() ? MIN_TIME : scenario.Now() + RandomTime8s(); + + // Announce txid first or wtxid first. + if (config & 1) { + scenario.ReceivedInv(peerT, txid, config & 2, reqtimeT); + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + scenario.ReceivedInv(peerW, wtxid, !(config & 2), reqtimeW); + } else { + scenario.ReceivedInv(peerW, wtxid, !(config & 2), reqtimeW); + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + scenario.ReceivedInv(peerT, txid, config & 2, reqtimeT); + } + + // Let time pass if needed, and check that the preferred announcement (txid or wtxid) + // is correctly to-be-requested (and with the correct wtxidness). + auto max_reqtime = std::max(reqtimeT, reqtimeW); + if (max_reqtime > scenario.Now()) scenario.AdvanceTime(max_reqtime - scenario.Now()); + if (config & 2) { + scenario.Check(peerT, {txid}, 1, 0, 0, "w1"); + scenario.Check(peerW, {}, 1, 0, 0, "w2"); + } else { + scenario.Check(peerT, {}, 1, 0, 0, "w3"); + scenario.Check(peerW, {wtxid}, 1, 0, 0, "w4"); + } + + // Let the preferred announcement be requested. It's not going to be delivered. + auto expiry = RandomTime8s(); + if (config & 2) { + scenario.RequestedTx(peerT, txid.GetHash(), scenario.Now() + expiry); + scenario.Check(peerT, {}, 0, 1, 0, "w5"); + scenario.Check(peerW, {}, 1, 0, 0, "w6"); + } else { + scenario.RequestedTx(peerW, wtxid.GetHash(), scenario.Now() + expiry); + scenario.Check(peerT, {}, 1, 0, 0, "w7"); + scenario.Check(peerW, {}, 0, 1, 0, "w8"); + } + + // After reaching expiration time of the preferred announcement, verify that the + // remaining one is requestable + scenario.AdvanceTime(expiry); + if (config & 2) { + scenario.Check(peerT, {}, 0, 0, 1, "w9"); + scenario.Check(peerW, {wtxid}, 1, 0, 0, "w10"); + } else { + scenario.Check(peerT, {txid}, 1, 0, 0, "w11"); + scenario.Check(peerW, {}, 0, 0, 1, "w12"); + } + + // If a good transaction with either that hash as wtxid or txid arrives, both + // announcements are gone. + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + scenario.ForgetTxHash(txhash); + scenario.Check(peerT, {}, 0, 0, 0, "w13"); + scenario.Check(peerW, {}, 0, 0, 0, "w14"); +} + +/** Add to scenario a test that exercises clocks that go backwards. */ +void BuildTimeBackwardsTest(Scenario& scenario) +{ + auto peer1 = scenario.NewPeer(); + auto peer2 = scenario.NewPeer(); + auto gtxid = scenario.NewGTxid({{peer1, peer2}}); + + // Announce from peer2. + auto reqtime = scenario.Now() + RandomTime8s(); + scenario.ReceivedInv(peer2, gtxid, true, reqtime); + scenario.Check(peer2, {}, 1, 0, 0, "r1"); + scenario.AdvanceTime(reqtime - scenario.Now()); + scenario.Check(peer2, {gtxid}, 1, 0, 0, "r2"); + // Check that if the clock goes backwards by 1us, the transaction would stop being requested. + scenario.Check(peer2, {}, 1, 0, 0, "r3", -MICROSECOND); + // But it reverts to being requested if time goes forward again. + scenario.Check(peer2, {gtxid}, 1, 0, 0, "r4"); + + // Announce from peer1. + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + scenario.ReceivedInv(peer1, gtxid, true, MAX_TIME); + scenario.Check(peer2, {gtxid}, 1, 0, 0, "r5"); + scenario.Check(peer1, {}, 1, 0, 0, "r6"); + + // Request from peer1. + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + auto expiry = scenario.Now() + RandomTime8s(); + scenario.RequestedTx(peer1, gtxid.GetHash(), expiry); + scenario.Check(peer1, {}, 0, 1, 0, "r7"); + scenario.Check(peer2, {}, 1, 0, 0, "r8"); + + // Expiration passes. + scenario.AdvanceTime(expiry - scenario.Now()); + scenario.Check(peer1, {}, 0, 0, 1, "r9"); + scenario.Check(peer2, {gtxid}, 1, 0, 0, "r10"); // Request goes back to peer2. + scenario.Check(peer1, {}, 0, 0, 1, "r11", -MICROSECOND); // Going back does not unexpire. + scenario.Check(peer2, {gtxid}, 1, 0, 0, "r12", -MICROSECOND); + + // Peer2 goes offline, meaning no viable announcements remain. + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + scenario.DisconnectedPeer(peer2); + scenario.Check(peer1, {}, 0, 0, 0, "r13"); + scenario.Check(peer2, {}, 0, 0, 0, "r14"); +} + +/** Add to scenario a test that involves RequestedTx() calls for txhashes not returned by GetRequestable. */ +void BuildWeirdRequestsTest(Scenario& scenario) +{ + auto peer1 = scenario.NewPeer(); + auto peer2 = scenario.NewPeer(); + auto gtxid1 = scenario.NewGTxid({{peer1, peer2}}); + auto gtxid2 = scenario.NewGTxid({{peer2, peer1}}); + + // Announce gtxid1 by peer1. + scenario.ReceivedInv(peer1, gtxid1, true, MIN_TIME); + scenario.Check(peer1, {gtxid1}, 1, 0, 0, "q1"); + + // Announce gtxid2 by peer2. + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + scenario.ReceivedInv(peer2, gtxid2, true, MIN_TIME); + scenario.Check(peer1, {gtxid1}, 1, 0, 0, "q2"); + scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q3"); + + // We request gtxid2 from *peer1* - no effect. + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + scenario.RequestedTx(peer1, gtxid2.GetHash(), MAX_TIME); + scenario.Check(peer1, {gtxid1}, 1, 0, 0, "q4"); + scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q5"); + + // Now request gtxid1 from peer1 - marks it as REQUESTED. + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + auto expiryA = scenario.Now() + RandomTime8s(); + scenario.RequestedTx(peer1, gtxid1.GetHash(), expiryA); + scenario.Check(peer1, {}, 0, 1, 0, "q6"); + scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q7"); + + // Request it a second time - nothing happens, as it's already REQUESTED. + auto expiryB = expiryA + RandomTime8s(); + scenario.RequestedTx(peer1, gtxid1.GetHash(), expiryB); + scenario.Check(peer1, {}, 0, 1, 0, "q8"); + scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q9"); + + // Also announce gtxid1 from peer2 now, so that the txhash isn't forgotten when the peer1 request expires. + scenario.ReceivedInv(peer2, gtxid1, true, MIN_TIME); + scenario.Check(peer1, {}, 0, 1, 0, "q10"); + scenario.Check(peer2, {gtxid2}, 2, 0, 0, "q11"); + + // When reaching expiryA, it expires (not expiryB, which is later). + scenario.AdvanceTime(expiryA - scenario.Now()); + scenario.Check(peer1, {}, 0, 0, 1, "q12"); + scenario.Check(peer2, {gtxid2, gtxid1}, 2, 0, 0, "q13"); + + // Requesting it yet again from peer1 doesn't do anything, as it's already COMPLETED. + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + scenario.RequestedTx(peer1, gtxid1.GetHash(), MAX_TIME); + scenario.Check(peer1, {}, 0, 0, 1, "q14"); + scenario.Check(peer2, {gtxid2, gtxid1}, 2, 0, 0, "q15"); + + // Now announce gtxid2 from peer1. + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + scenario.ReceivedInv(peer1, gtxid2, true, MIN_TIME); + scenario.Check(peer1, {}, 1, 0, 1, "q16"); + scenario.Check(peer2, {gtxid2, gtxid1}, 2, 0, 0, "q17"); + + // And request it from peer1 (weird as peer2 has the preference). + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + scenario.RequestedTx(peer1, gtxid2.GetHash(), MAX_TIME); + scenario.Check(peer1, {}, 0, 1, 1, "q18"); + scenario.Check(peer2, {gtxid1}, 2, 0, 0, "q19"); + + // If peer2 now (normally) requests gtxid2, the existing request by peer1 becomes COMPLETED. + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + scenario.RequestedTx(peer2, gtxid2.GetHash(), MAX_TIME); + scenario.Check(peer1, {}, 0, 0, 2, "q20"); + scenario.Check(peer2, {gtxid1}, 1, 1, 0, "q21"); + + // If peer2 goes offline, no viable announcements remain. + scenario.DisconnectedPeer(peer2); + scenario.Check(peer1, {}, 0, 0, 0, "q22"); + scenario.Check(peer2, {}, 0, 0, 0, "q23"); +} + +void TestInterleavedScenarios() +{ + // Create a list of functions which add tests to scenarios. + std::vector> builders; + // Add instances of every test, for every configuration. + for (int n = 0; n < 64; ++n) { + builders.emplace_back([n](Scenario& scenario){ BuildWtxidTest(scenario, n); }); + builders.emplace_back([n](Scenario& scenario){ BuildRequestOrderTest(scenario, n & 3); }); + builders.emplace_back([n](Scenario& scenario){ BuildSingleTest(scenario, n & 31); }); + builders.emplace_back([n](Scenario& scenario){ BuildPriorityTest(scenario, n & 31); }); + builders.emplace_back([n](Scenario& scenario){ BuildBigPriorityTest(scenario, (n & 7) + 1); }); + builders.emplace_back([](Scenario& scenario){ BuildTimeBackwardsTest(scenario); }); + builders.emplace_back([](Scenario& scenario){ BuildWeirdRequestsTest(scenario); }); + } + // Randomly shuffle all those functions. + Shuffle(builders.begin(), builders.end(), g_insecure_rand_ctx); + + Runner runner; + auto starttime = RandomTime1y(); + // Construct many scenarios, and run (up to) 10 randomly-chosen tests consecutively in each. + while (builders.size()) { + // Introduce some variation in the start time of each scenario, so they don't all start off + // concurrently, but get a more random interleaving. + auto scenario_start = starttime + RandomTime8s() + RandomTime8s() + RandomTime8s(); + Scenario scenario(runner, scenario_start); + for (int j = 0; builders.size() && j < 10; ++j) { + builders.back()(scenario); + builders.pop_back(); + } + } + // Sort all the actions from all those scenarios chronologically, resulting in the actions from + // distinct scenarios to become interleaved. Use stable_sort so that actions from one scenario + // aren't reordered w.r.t. each other. + std::stable_sort(runner.actions.begin(), runner.actions.end(), [](const Action& a1, const Action& a2) { + return a1.first < a2.first; + }); + + // Run all actions from all scenarios, in order. + for (auto& action : runner.actions) { + action.second(); + } + + BOOST_CHECK_EQUAL(runner.txrequest.Size(), 0U); +} + +} // namespace + +BOOST_AUTO_TEST_CASE(TxRequestTest) +{ + for (int i = 0; i < 5; ++i) { + TestInterleavedScenarios(); + } +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/txrequest.cpp b/src/txrequest.cpp index bb64a00847d57..cabdc6395717f 100644 --- a/src/txrequest.cpp +++ b/src/txrequest.cpp @@ -228,6 +228,69 @@ struct PeerInfo { size_t m_requested = 0; //!< Number of REQUESTED announcements for this peer. }; +/** Per-txhash statistics object. Only used for sanity checking. */ +struct TxHashInfo +{ + //! Number of CANDIDATE_DELAYED announcements for this txhash. + size_t m_candidate_delayed = 0; + //! Number of CANDIDATE_READY announcements for this txhash. + size_t m_candidate_ready = 0; + //! Number of CANDIDATE_BEST announcements for this txhash (at most one). + size_t m_candidate_best = 0; + //! Number of REQUESTED announcements for this txhash (at most one; mutually exclusive with CANDIDATE_BEST). + size_t m_requested = 0; + //! The priority of the CANDIDATE_BEST announcement if one exists, or max() otherwise. + Priority m_priority_candidate_best = std::numeric_limits::max(); + //! The highest priority of all CANDIDATE_READY announcements (or min() if none exist). + Priority m_priority_best_candidate_ready = std::numeric_limits::min(); + //! All peers we have an announcement for this txhash for. + std::vector m_peers; +}; + +/** Compare two PeerInfo objects. Only used for sanity checking. */ +bool operator==(const PeerInfo& a, const PeerInfo& b) +{ + return std::tie(a.m_total, a.m_completed, a.m_requested) == + std::tie(b.m_total, b.m_completed, b.m_requested); +}; + +/** (Re)compute the PeerInfo map from the index. Only used for sanity checking. */ +std::unordered_map RecomputePeerInfo(const Index& index) +{ + std::unordered_map ret; + for (const Announcement& ann : index) { + PeerInfo& info = ret[ann.m_peer]; + ++info.m_total; + info.m_requested += (ann.m_state == State::REQUESTED); + info.m_completed += (ann.m_state == State::COMPLETED); + } + return ret; +} + +/** Compute the TxHashInfo map. Only used for sanity checking. */ +std::map ComputeTxHashInfo(const Index& index, const PriorityComputer& computer) +{ + std::map ret; + for (const Announcement& ann : index) { + TxHashInfo& info = ret[ann.m_txhash]; + // Classify how many announcements of each state we have for this txhash. + info.m_candidate_delayed += (ann.m_state == State::CANDIDATE_DELAYED); + info.m_candidate_ready += (ann.m_state == State::CANDIDATE_READY); + info.m_candidate_best += (ann.m_state == State::CANDIDATE_BEST); + info.m_requested += (ann.m_state == State::REQUESTED); + // And track the priority of the best CANDIDATE_READY/CANDIDATE_BEST announcements. + if (ann.m_state == State::CANDIDATE_BEST) { + info.m_priority_candidate_best = computer(ann); + } + if (ann.m_state == State::CANDIDATE_READY) { + info.m_priority_best_candidate_ready = std::max(info.m_priority_best_candidate_ready, computer(ann)); + } + // Also keep track of which peers this txhash has an announcement for (so we can detect duplicates). + info.m_peers.push_back(ann.m_peer); + } + return ret; +} + } // namespace /** Actual implementation for TxRequestTracker's data structure. */ @@ -239,12 +302,63 @@ class TxRequestTracker::Impl { //! This tracker's priority computer. const PriorityComputer m_computer; - //! This tracker's main data structure. + //! This tracker's main data structure. See SanityCheck() for the invariants that apply to it. Index m_index; //! Map with this tracker's per-peer statistics. std::unordered_map m_peerinfo; +public: + void SanityCheck() const + { + // Recompute m_peerdata from m_index. This verifies the data in it as it should just be caching statistics + // on m_index. It also verifies the invariant that no PeerInfo announcements with m_total==0 exist. + assert(m_peerinfo == RecomputePeerInfo(m_index)); + + // Calculate per-txhash statistics from m_index, and validate invariants. + for (auto& item : ComputeTxHashInfo(m_index, m_computer)) { + TxHashInfo& info = item.second; + + // Cannot have only COMPLETED peer (txhash should have been forgotten already) + assert(info.m_candidate_delayed + info.m_candidate_ready + info.m_candidate_best + info.m_requested > 0); + + // Can have at most 1 CANDIDATE_BEST/REQUESTED peer + assert(info.m_candidate_best + info.m_requested <= 1); + + // If there are any CANDIDATE_READY announcements, there must be exactly one CANDIDATE_BEST or REQUESTED + // announcement. + if (info.m_candidate_ready > 0) { + assert(info.m_candidate_best + info.m_requested == 1); + } + + // If there is both a CANDIDATE_READY and a CANDIDATE_BEST announcement, the CANDIDATE_BEST one must be + // at least as good (equal or higher priority) as the best CANDIDATE_READY. + if (info.m_candidate_ready && info.m_candidate_best) { + assert(info.m_priority_candidate_best >= info.m_priority_best_candidate_ready); + } + + // No txhash can have been announced by the same peer twice. + std::sort(info.m_peers.begin(), info.m_peers.end()); + assert(std::adjacent_find(info.m_peers.begin(), info.m_peers.end()) == info.m_peers.end()); + } + } + + void PostGetRequestableSanityCheck(std::chrono::microseconds now) const + { + for (const Announcement& ann : m_index) { + if (ann.IsWaiting()) { + // REQUESTED and CANDIDATE_DELAYED must have a time in the future (they should have been converted + // to COMPLETED/CANDIDATE_READY respectively). + assert(ann.m_time > now); + } else if (ann.IsSelectable()) { + // CANDIDATE_READY and CANDIDATE_BEST cannot have a time in the future (they should have remained + // CANDIDATE_DELAYED, or should have been converted back to it if time went backwards). + assert(ann.m_time <= now); + } + } + } + +private: //! Wrapper around Index::...::erase that keeps m_peerinfo up to date. template Iter Erase(Iter it) @@ -570,6 +684,13 @@ class TxRequestTracker::Impl { //! Count how many announcements are being tracked in total across all peers and transactions. size_t Size() const { return m_index.size(); } + + uint64_t ComputePriority(const uint256& txhash, NodeId peer, bool preferred) const + { + // Return Priority as a uint64_t as Priority is internal. + return uint64_t{m_computer(txhash, peer, preferred)}; + } + }; TxRequestTracker::TxRequestTracker(bool deterministic) : @@ -583,6 +704,12 @@ size_t TxRequestTracker::CountInFlight(NodeId peer) const { return m_impl->Count size_t TxRequestTracker::CountCandidates(NodeId peer) const { return m_impl->CountCandidates(peer); } size_t TxRequestTracker::Count(NodeId peer) const { return m_impl->Count(peer); } size_t TxRequestTracker::Size() const { return m_impl->Size(); } +void TxRequestTracker::SanityCheck() const { m_impl->SanityCheck(); } + +void TxRequestTracker::PostGetRequestableSanityCheck(std::chrono::microseconds now) const +{ + m_impl->PostGetRequestableSanityCheck(now); +} void TxRequestTracker::ReceivedInv(NodeId peer, const GenTxid& gtxid, bool preferred, std::chrono::microseconds reqtime) @@ -604,3 +731,8 @@ std::vector TxRequestTracker::GetRequestable(NodeId peer, std::chrono:: { return m_impl->GetRequestable(peer, now); } + +uint64_t TxRequestTracker::ComputePriority(const uint256& txhash, NodeId peer, bool preferred) const +{ + return m_impl->ComputePriority(txhash, peer, preferred); +} diff --git a/src/txrequest.h b/src/txrequest.h index e24390b6aae37..a434901a5282d 100644 --- a/src/txrequest.h +++ b/src/txrequest.h @@ -192,6 +192,18 @@ class TxRequestTracker { /** Count how many announcements are being tracked in total across all peers and transaction hashes. */ size_t Size() const; + + /** Access to the internal priority computation (testing only) */ + uint64_t ComputePriority(const uint256& txhash, NodeId peer, bool preferred) const; + + /** Run internal consistency check (testing only). */ + void SanityCheck() const; + + /** Run a time-dependent internal consistency check (testing only). + * + * This can only be called immediately after GetRequestable, with the same 'now' parameter. + */ + void PostGetRequestableSanityCheck(std::chrono::microseconds now) const; }; #endif // BITCOIN_TXREQUEST_H