diff --git a/qa/rpc-tests/autoix-mempool.py b/qa/rpc-tests/autoix-mempool.py index 0820afa3eadc1..5bd9ed023f4b5 100755 --- a/qa/rpc-tests/autoix-mempool.py +++ b/qa/rpc-tests/autoix-mempool.py @@ -23,10 +23,10 @@ class AutoIXMempoolTest(DashTestFramework): def __init__(self): - super().__init__(13, 10, ["-maxmempool=%d" % MAX_MEMPOOL_SIZE, '-limitdescendantsize=10'], fast_dip3_enforcement=True) + super().__init__(8, 5, ["-maxmempool=%d" % MAX_MEMPOOL_SIZE, '-limitdescendantsize=10'], fast_dip3_enforcement=True) # set sender, receiver - self.receiver_idx = self.num_nodes - 2 - self.sender_idx = self.num_nodes - 3 + self.receiver_idx = 1 + self.sender_idx = 2 def get_autoix_bip9_status(self): info = self.nodes[0].getblockchaininfo() diff --git a/qa/rpc-tests/llmq-chainlocks.py b/qa/rpc-tests/llmq-chainlocks.py index 1ce0e4fbd5a5d..6cbcc6cdc108a 100755 --- a/qa/rpc-tests/llmq-chainlocks.py +++ b/qa/rpc-tests/llmq-chainlocks.py @@ -17,7 +17,7 @@ class LLMQChainLocksTest(DashTestFramework): def __init__(self): - super().__init__(11, 10, [], fast_dip3_enforcement=True) + super().__init__(6, 5, [], fast_dip3_enforcement=True) def run_test(self): diff --git a/qa/rpc-tests/llmq-signing.py b/qa/rpc-tests/llmq-signing.py index b84986957d53d..02e9637988b1e 100755 --- a/qa/rpc-tests/llmq-signing.py +++ b/qa/rpc-tests/llmq-signing.py @@ -17,7 +17,7 @@ class LLMQSigningTest(DashTestFramework): def __init__(self): - super().__init__(11, 10, [], fast_dip3_enforcement=True) + super().__init__(6, 5, [], fast_dip3_enforcement=True) def run_test(self): @@ -57,13 +57,13 @@ def assert_sigs_nochange(hasrecsigs, isconflicting1, isconflicting2, timeout): # Initial state wait_for_sigs(False, False, False, 1) - # Sign 5 shares, should not result in recovered sig - for i in range(5): + # Sign 2 shares, should not result in recovered sig + for i in range(2): self.mninfo[i].node.quorum("sign", 100, id, msgHash) assert_sigs_nochange(False, False, False, 3) # Sign one more share, should result in recovered sig and conflict for msgHashConflict - self.mninfo[6].node.quorum("sign", 100, id, msgHash) + self.mninfo[2].node.quorum("sign", 100, id, msgHash) wait_for_sigs(True, False, True, 15) # Mine one more quorum, so that we have 2 active ones, nothing should change @@ -86,9 +86,9 @@ def assert_sigs_nochange(hasrecsigs, isconflicting1, isconflicting2, timeout): # Cleanup starts every 5 seconds wait_for_sigs(False, False, False, 15) - for i in range(4): + for i in range(2): self.mninfo[i].node.quorum("sign", 100, id, msgHashConflict) - for i in range(4, 10): + for i in range(2, 5): self.mninfo[i].node.quorum("sign", 100, id, msgHash) wait_for_sigs(True, False, True, 15) diff --git a/qa/rpc-tests/llmq-simplepose.py b/qa/rpc-tests/llmq-simplepose.py index 81b234fa5fdb3..75d48e2be01e5 100755 --- a/qa/rpc-tests/llmq-simplepose.py +++ b/qa/rpc-tests/llmq-simplepose.py @@ -16,7 +16,7 @@ class LLMQSimplePoSeTest(DashTestFramework): def __init__(self): - super().__init__(11, 10, [], fast_dip3_enforcement=True) + super().__init__(6, 5, [], fast_dip3_enforcement=True) def run_test(self): @@ -25,7 +25,7 @@ def run_test(self): # check if mining quorums with all nodes being online succeeds without punishment/banning for i in range(3): - self.mine_quorum(expected_valid_count=10) + self.mine_quorum() for mn in self.mninfo: assert(not self.check_punished(mn) and not self.check_punished(mn)) diff --git a/qa/rpc-tests/p2p-autoinstantsend.py b/qa/rpc-tests/p2p-autoinstantsend.py index 4ffc088841b7b..fb2910a313dd8 100755 --- a/qa/rpc-tests/p2p-autoinstantsend.py +++ b/qa/rpc-tests/p2p-autoinstantsend.py @@ -23,11 +23,10 @@ class AutoInstantSendTest(DashTestFramework): def __init__(self): - super().__init__(14, 10, [], fast_dip3_enforcement=True) + super().__init__(8, 5, [], fast_dip3_enforcement=True) # set sender, receiver, isolated nodes - self.isolated_idx = self.num_nodes - 1 - self.receiver_idx = self.num_nodes - 2 - self.sender_idx = self.num_nodes - 3 + self.receiver_idx = 1 + self.sender_idx = 2 def get_autoix_bip9_status(self): info = self.nodes[0].getblockchaininfo() diff --git a/qa/rpc-tests/p2p-instantsend.py b/qa/rpc-tests/p2p-instantsend.py index b5fee665762fd..89841d0c04cc4 100755 --- a/qa/rpc-tests/p2p-instantsend.py +++ b/qa/rpc-tests/p2p-instantsend.py @@ -14,11 +14,11 @@ class InstantSendTest(DashTestFramework): def __init__(self): - super().__init__(14, 10, [], fast_dip3_enforcement=True) + super().__init__(9, 5, [], fast_dip3_enforcement=True) # set sender, receiver, isolated nodes - self.isolated_idx = self.num_nodes - 1 - self.receiver_idx = self.num_nodes - 2 - self.sender_idx = self.num_nodes - 3 + self.isolated_idx = 1 + self.receiver_idx = 2 + self.sender_idx = 3 def run_test(self): self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0) @@ -50,7 +50,7 @@ def test_doublespend(self): self.nodes[self.isolated_idx], 0.5, 1, 100) # stop one node to isolate it from network - stop_node(self.nodes[self.isolated_idx], self.isolated_idx) + self.nodes[self.isolated_idx].setnetworkactive(False) # instantsend to receiver receiver_addr = self.nodes[self.receiver_idx].getnewaddress() is_id = self.nodes[self.sender_idx].instantsendtoaddress(receiver_addr, 0.9) @@ -66,10 +66,6 @@ def test_doublespend(self): break sleep(0.1) assert(locked) - # start last node - self.nodes[self.isolated_idx] = start_node(self.isolated_idx, - self.options.tmpdir, - self.extra_args) # send doublespend transaction to isolated node self.nodes[self.isolated_idx].sendrawtransaction(dblspnd_tx['hex']) # generate block on isolated node with doublespend transaction @@ -78,11 +74,14 @@ def test_doublespend(self): self.nodes[self.isolated_idx].generate(1) wrong_block = self.nodes[self.isolated_idx].getbestblockhash() # connect isolated block to network + self.nodes[self.isolated_idx].setnetworkactive(True) for i in range(0, self.isolated_idx): connect_nodes(self.nodes[i], self.isolated_idx) # check doublespend block is rejected by other nodes timeout = 10 - for i in range(0, self.isolated_idx): + for i in range(0, self.num_nodes): + if i == self.isolated_idx: + continue res = self.nodes[i].waitforblock(wrong_block, timeout) assert (res['hash'] != wrong_block) # wait for long time only for first node diff --git a/qa/rpc-tests/test_framework/test_framework.py b/qa/rpc-tests/test_framework/test_framework.py index 0d07b87ed57c5..7c6b9b65e11f6 100755 --- a/qa/rpc-tests/test_framework/test_framework.py +++ b/qa/rpc-tests/test_framework/test_framework.py @@ -479,10 +479,10 @@ def wait_for_quorum_phase(self, phase, check_received_messages, check_received_m all_ok = True for mn in self.mninfo: s = mn.node.quorum("dkgstatus")["session"] - if "llmq_10" not in s: + if "llmq_5_60" not in s: all_ok = False break - s = s["llmq_10"] + s = s["llmq_5_60"] if "phase" not in s: all_ok = False break @@ -508,7 +508,7 @@ def wait_for_quorum_commitment(self, timeout = 15): all_ok = False break s = s["minableCommitments"] - if "llmq_10" not in s: + if "llmq_5_60" not in s: all_ok = False break if all_ok: @@ -516,7 +516,7 @@ def wait_for_quorum_commitment(self, timeout = 15): sleep(0.1) raise AssertionError("wait_for_quorum_commitment timed out") - def mine_quorum(self, expected_valid_count=10): + def mine_quorum(self, expected_valid_count=5): quorums = self.nodes[0].quorum("list") # move forward to next DKG @@ -544,7 +544,7 @@ def mine_quorum(self, expected_valid_count=10): sync_blocks(self.nodes) # Make sure all reached phase 3 (complain) and received all complaints - self.wait_for_quorum_phase(3, "receivedComplaints" if expected_valid_count != 10 else None, expected_valid_count) + self.wait_for_quorum_phase(3, "receivedComplaints" if expected_valid_count != 5 else None, expected_valid_count) set_mocktime(get_mocktime() + 1) set_node_times(self.nodes, get_mocktime()) self.nodes[0].generate(2) diff --git a/src/chainparams.cpp b/src/chainparams.cpp index 8d850068c2c10..c9c0976d87a6e 100644 --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -107,12 +107,12 @@ static CBlock FindDevNetGenesisBlock(const Consensus::Params& params, const CBlo } // this one is for testing only -static Consensus::LLMQParams llmq10_60 = { - .type = Consensus::LLMQ_10_60, - .name = "llmq_10", - .size = 10, - .minSize = 6, - .threshold = 6, +static Consensus::LLMQParams llmq5_60 = { + .type = Consensus::LLMQ_5_60, + .name = "llmq_5_60", + .size = 5, + .minSize = 3, + .threshold = 3, .dkgInterval = 24, // one DKG per hour .dkgPhaseBlocks = 2, @@ -203,6 +203,8 @@ class CMainParams : public CChainParams { consensus.nMasternodePaymentsIncreasePeriod = 576*30; // 17280 - actual historical value consensus.nInstantSendConfirmationsRequired = 6; consensus.nInstantSendKeepLock = 24; + consensus.nInstantSendSigsRequired = 6; + consensus.nInstantSendSigsTotal = 10; consensus.nBudgetPaymentsStartBlock = 328008; // actual historical value consensus.nBudgetPaymentsCycleBlocks = 16616; // ~(60*24*30)/2.6, actual number of blocks per month is 200700 / 12 = 16725 consensus.nBudgetPaymentsWindowBlocks = 100; @@ -376,6 +378,8 @@ class CTestNetParams : public CChainParams { consensus.nMasternodePaymentsIncreasePeriod = 10; consensus.nInstantSendConfirmationsRequired = 2; consensus.nInstantSendKeepLock = 6; + consensus.nInstantSendSigsRequired = 6; + consensus.nInstantSendSigsTotal = 10; consensus.nBudgetPaymentsStartBlock = 4100; consensus.nBudgetPaymentsCycleBlocks = 50; consensus.nBudgetPaymentsWindowBlocks = 10; @@ -526,6 +530,8 @@ class CDevNetParams : public CChainParams { consensus.nMasternodePaymentsIncreasePeriod = 10; consensus.nInstantSendConfirmationsRequired = 2; consensus.nInstantSendKeepLock = 6; + consensus.nInstantSendSigsRequired = 6; + consensus.nInstantSendSigsTotal = 10; consensus.nBudgetPaymentsStartBlock = 4100; consensus.nBudgetPaymentsCycleBlocks = 50; consensus.nBudgetPaymentsWindowBlocks = 10; @@ -683,6 +689,8 @@ class CRegTestParams : public CChainParams { consensus.nMasternodePaymentsIncreasePeriod = 10; consensus.nInstantSendConfirmationsRequired = 2; consensus.nInstantSendKeepLock = 6; + consensus.nInstantSendSigsRequired = 3; + consensus.nInstantSendSigsTotal = 5; consensus.nBudgetPaymentsStartBlock = 1000; consensus.nBudgetPaymentsCycleBlocks = 50; consensus.nBudgetPaymentsWindowBlocks = 10; @@ -787,10 +795,10 @@ class CRegTestParams : public CChainParams { nExtCoinType = 1; // long living quorum params - consensus.llmqs[Consensus::LLMQ_10_60] = llmq10_60; + consensus.llmqs[Consensus::LLMQ_5_60] = llmq5_60; consensus.llmqs[Consensus::LLMQ_50_60] = llmq50_60; - consensus.llmqChainLocks = Consensus::LLMQ_10_60; - consensus.llmqForInstantSend = Consensus::LLMQ_10_60; + consensus.llmqChainLocks = Consensus::LLMQ_5_60; + consensus.llmqForInstantSend = Consensus::LLMQ_5_60; } void UpdateBIP9Parameters(Consensus::DeploymentPos d, int64_t nStartTime, int64_t nTimeout, int64_t nWindowSize, int64_t nThreshold) diff --git a/src/consensus/params.h b/src/consensus/params.h index b2efd4ca755c6..fe06fc4cf6a0e 100644 --- a/src/consensus/params.h +++ b/src/consensus/params.h @@ -48,7 +48,7 @@ enum LLMQType : uint8_t LLMQ_400_85 = 3, // 400 members, 340 (85%) threshold, one every 24 hours // for testing only - LLMQ_10_60 = 100, // 10 members, 6 (60%) threshold, one per hour + LLMQ_5_60 = 100, // 5 members, 3 (60%) threshold, one per hour }; // Configures a LLMQ and its DKG @@ -124,6 +124,8 @@ struct Params { int nMasternodePaymentsIncreasePeriod; // in blocks int nInstantSendConfirmationsRequired; // in blocks int nInstantSendKeepLock; // in blocks + int nInstantSendSigsRequired; + int nInstantSendSigsTotal; int nBudgetPaymentsStartBlock; int nBudgetPaymentsCycleBlocks; int nBudgetPaymentsWindowBlocks; diff --git a/src/instantx.cpp b/src/instantx.cpp index 87bbd1a5b6bf1..2b017ddb5581c 100644 --- a/src/instantx.cpp +++ b/src/instantx.cpp @@ -47,8 +47,8 @@ const std::string CInstantSend::SERIALIZATION_VERSION_STRING = "CInstantSend-Ver // Transaction Locks // // step 1) Some node announces intention to lock transaction inputs via "txlockrequest" message (ix) -// step 2) Top COutPointLock::SIGNATURES_TOTAL masternodes per each spent outpoint push "txlockvote" message (txlvote) -// step 3) Once there are COutPointLock::SIGNATURES_REQUIRED valid "txlockvote" messages (txlvote) per each spent outpoint +// step 2) Top nInstantSendSigsTotal masternodes per each spent outpoint push "txlockvote" message (txlvote) +// step 3) Once there are nInstantSendSigsRequired valid "txlockvote" messages (txlvote) per each spent outpoint // for a corresponding "txlockrequest" message (ix), all outpoints from that tx are treated as locked // @@ -252,7 +252,7 @@ void CInstantSend::Vote(CTxLockCandidate& txLockCandidate, CConnman& connman) continue; } - int nSignaturesTotal = COutPointLock::SIGNATURES_TOTAL; + int nSignaturesTotal = Params().GetConsensus().nInstantSendSigsTotal; if (nRank > nSignaturesTotal) { LogPrint("instantsend", "CInstantSend::Vote -- Masternode not in the top %d (%d)\n", nSignaturesTotal, nRank); continue; @@ -1025,7 +1025,7 @@ CAmount CTxLockRequest::GetMinFee(bool fForceMinFee) const int CTxLockRequest::GetMaxSignatures() const { - return tx->vin.size() * COutPointLock::SIGNATURES_TOTAL; + return tx->vin.size() * Params().GetConsensus().nInstantSendSigsTotal; } bool CTxLockRequest::IsSimple() const @@ -1087,7 +1087,7 @@ bool CTxLockVote::IsValid(CNode* pnode, CConnman& connman) const LogPrint("instantsend", "CTxLockVote::IsValid -- Masternode %s, rank=%d\n", outpointMasternode.ToStringShort(), nRank); - int nSignaturesTotal = COutPointLock::SIGNATURES_TOTAL; + int nSignaturesTotal = Params().GetConsensus().nInstantSendSigsTotal; if (nRank > nSignaturesTotal) { LogPrint("instantsend", "CTxLockVote::IsValid -- Masternode %s is not in the top %d (%d), vote hash=%s\n", outpointMasternode.ToStringShort(), nSignaturesTotal, nRank, GetHash().ToString()); @@ -1196,6 +1196,11 @@ bool COutPointLock::HasMasternodeVoted(const COutPoint& outpointMasternodeIn) co return mapMasternodeVotes.count(outpointMasternodeIn); } +bool COutPointLock::IsReady() const +{ + return !fAttacked && CountVotes() >= Params().GetConsensus().nInstantSendSigsRequired; +} + void COutPointLock::Relay(CConnman& connman) const { for (const auto& pair : mapMasternodeVotes) { diff --git a/src/instantx.h b/src/instantx.h index aac54e2d1acfc..e7a14844aca7d 100644 --- a/src/instantx.h +++ b/src/instantx.h @@ -310,9 +310,6 @@ class COutPointLock bool fAttacked = false; public: - static const int SIGNATURES_REQUIRED = 6; - static const int SIGNATURES_TOTAL = 10; - COutPointLock() {} COutPointLock(const COutPoint& outpointIn) : @@ -335,7 +332,7 @@ class COutPointLock std::vector GetVotes() const; bool HasMasternodeVoted(const COutPoint& outpointMasternodeIn) const; int CountVotes() const { return fAttacked ? 0 : mapMasternodeVotes.size(); } - bool IsReady() const { return !fAttacked && CountVotes() >= SIGNATURES_REQUIRED; } + bool IsReady() const; void MarkAsAttacked() { fAttacked = true; } void Relay(CConnman& connman) const; diff --git a/src/llmq/quorums_dkgsessionhandler.cpp b/src/llmq/quorums_dkgsessionhandler.cpp index 311a931435b6f..0a376a45270c2 100644 --- a/src/llmq/quorums_dkgsessionhandler.cpp +++ b/src/llmq/quorums_dkgsessionhandler.cpp @@ -376,7 +376,7 @@ bool ProcessPendingMessageBatch(CDKGSession& session, CDKGPendingMessages& pendi for (const auto& p : msgs) { if (!p.second) { - LogPrint("net", "%s -- failed to deserialize message, peer=%d", __func__, p.first); + LogPrint("net", "%s -- failed to deserialize message, peer=%d\n", __func__, p.first); { LOCK(cs_main); Misbehaving(p.first, 100); diff --git a/src/serialize.h b/src/serialize.h index d2104672d8999..4c82a2821f3d1 100644 --- a/src/serialize.h +++ b/src/serialize.h @@ -437,7 +437,7 @@ class CFixedBitSet vec[p] = (vBytes[p / 8] & (1 << (p % 8))) != 0; if (vBytes.size() * 8 != size) { size_t rem = vBytes.size() * 8 - size; - uint8_t m = (uint8_t)(0xff << rem); + uint8_t m = ~(uint8_t)(0xff >> rem); if (vBytes[vBytes.size() - 1] & m) { throw std::ios_base::failure("Out-of-range bits set"); }