Skip to content

Commit 22cfdda

Browse files
committed
Allow re-signing of IS locks when performing retroactive signing (#3219)
* Implement re-signing of InstantSend inputs when TXs come in via blocks * Use GetAdjustedTime instead of GetTimeMillis in CSigSharesManager This allows use of mocktime in tests. * Expose verifiedProRegTxHash in getpeerinfo and implement wait_for_mnauth * Allow to wait for IS and CL to NOT happen * Bump timeout for wait_for_instantlock * Implement tests for retroactive signing of IS and CLs * Add wait_for_tx function to DashTestFramework * Add -whitelist=127.0.0.1 to node0 * Use node3 for isolated block generation * Don't test for non-receival of TXs on node4/node5
1 parent a8b8891 commit 22cfdda

File tree

12 files changed

+283
-22
lines changed

12 files changed

+283
-22
lines changed

qa/pull-tester/rpc-tests.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@
4848
'llmq-chainlocks.py', # NOTE: needs dash_hash to pass
4949
'llmq-simplepose.py', # NOTE: needs dash_hash to pass
5050
'llmq-is-cl-conflicts.py', # NOTE: needs dash_hash to pass
51+
'llmq-is-retroactive.py', # NOTE: needs dash_hash to pass
5152
'llmq-dkgerrors.py', # NOTE: needs dash_hash to pass
5253
'dip4-coinbasemerkleroots.py', # NOTE: needs dash_hash to pass
5354
# vv Tests less than 60s vv
Lines changed: 178 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,178 @@
1+
#!/usr/bin/env python3
2+
# Copyright (c) 2015-2018 The Dash Core developers
3+
# Distributed under the MIT software license, see the accompanying
4+
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
5+
6+
from test_framework.mininode import *
7+
from test_framework.test_framework import DashTestFramework
8+
from test_framework.util import sync_blocks, set_node_times, \
9+
isolate_node, reconnect_isolated_node
10+
11+
'''
12+
llmq-is-retroactive.py
13+
14+
Tests retroactive signing
15+
16+
We have 6 nodes where node 0 is the control node, nodes 1-5 are masternodes.
17+
Mempool inconsistencies are simulated via disconnecting/reconnecting node 3
18+
and by having a higher relay fee on nodes 4 and 5.
19+
'''
20+
21+
class LLMQ_IS_RetroactiveSigning(DashTestFramework):
22+
def set_test_params(self):
23+
# -whitelist is needed to avoid the trickling logic on node0
24+
self.set_dash_test_params(6, 5, [["-whitelist=127.0.0.1"], [], [], [], ["-minrelaytxfee=0.001"], ["-minrelaytxfee=0.001"]], fast_dip3_enforcement=True)
25+
26+
def run_test(self):
27+
while self.nodes[0].getblockchaininfo()["bip9_softforks"]["dip0008"]["status"] != "active":
28+
self.nodes[0].generate(10)
29+
sync_blocks(self.nodes, timeout=60*5)
30+
31+
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
32+
self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 0)
33+
self.nodes[0].spork("SPORK_2_INSTANTSEND_ENABLED", 0)
34+
self.nodes[0].spork("SPORK_3_INSTANTSEND_BLOCK_FILTERING", 0)
35+
self.wait_for_sporks_same()
36+
37+
self.mine_quorum()
38+
self.mine_quorum()
39+
40+
# Make sure that all nodes are chainlocked at the same height before starting actual tests
41+
self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash())
42+
43+
self.log.info("trying normal IS lock")
44+
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
45+
# 3 nodes should be enough to create an IS lock even if nodes 4 and 5 (which have no tx itself)
46+
# are the only "neighbours" in intra-quorum connections for one of them.
47+
self.wait_for_instantlock(txid, self.nodes[0])
48+
self.bump_mocktime(1)
49+
set_node_times(self.nodes, self.mocktime)
50+
block = self.nodes[0].generate(1)[0]
51+
self.wait_for_chainlocked_block_all_nodes(block)
52+
53+
self.log.info("testing normal signing with partially known TX")
54+
isolate_node(self.nodes[3])
55+
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
56+
# Make sure nodes 1 and 2 received the TX before we continue,
57+
# otherwise it might announce the TX to node 3 when reconnecting
58+
self.wait_for_tx(txid, self.nodes[1])
59+
self.wait_for_tx(txid, self.nodes[2])
60+
reconnect_isolated_node(self.nodes[3], 0)
61+
self.wait_for_mnauth(self.nodes[3], 2)
62+
# node 3 fully reconnected but the TX wasn't relayed to it, so there should be no IS lock
63+
self.wait_for_instantlock(txid, self.nodes[0], False, 5)
64+
# push the tx directly via rpc
65+
self.nodes[3].sendrawtransaction(self.nodes[0].getrawtransaction(txid))
66+
# node 3 should vote on a tx now since it became aware of it via sendrawtransaction
67+
# and this should be enough to complete an IS lock
68+
self.wait_for_instantlock(txid, self.nodes[0])
69+
70+
self.log.info("testing retroactive signing with unknown TX")
71+
isolate_node(self.nodes[3])
72+
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1})
73+
rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex']
74+
rawtx = self.nodes[0].signrawtransaction(rawtx)['hex']
75+
txid = self.nodes[3].sendrawtransaction(rawtx)
76+
# Make node 3 consider the TX as safe
77+
self.bump_mocktime(10 * 60 + 1)
78+
set_node_times(self.nodes, self.mocktime)
79+
block = self.nodes[3].generatetoaddress(1, self.nodes[0].getnewaddress())[0]
80+
reconnect_isolated_node(self.nodes[3], 0)
81+
self.wait_for_chainlocked_block_all_nodes(block)
82+
self.nodes[0].setmocktime(self.mocktime)
83+
84+
self.log.info("testing retroactive signing with partially known TX")
85+
isolate_node(self.nodes[3])
86+
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
87+
# Make sure nodes 1 and 2 received the TX before we continue,
88+
# otherwise it might announce the TX to node 3 when reconnecting
89+
self.wait_for_tx(txid, self.nodes[1])
90+
self.wait_for_tx(txid, self.nodes[2])
91+
reconnect_isolated_node(self.nodes[3], 0)
92+
self.wait_for_mnauth(self.nodes[3], 2)
93+
# node 3 fully reconnected but the TX wasn't relayed to it, so there should be no IS lock
94+
self.wait_for_instantlock(txid, self.nodes[0], False, 5)
95+
# Make node0 consider the TX as safe
96+
self.bump_mocktime(10 * 60 + 1)
97+
set_node_times(self.nodes, self.mocktime)
98+
block = self.nodes[0].generate(1)[0]
99+
self.wait_for_chainlocked_block_all_nodes(block)
100+
101+
self.log.info("testing retroactive signing with partially known TX and all nodes session timeout")
102+
self.test_all_nodes_session_timeout(False)
103+
self.log.info("repeating test, but with cycled LLMQs")
104+
self.test_all_nodes_session_timeout(True)
105+
106+
self.log.info("testing retroactive signing with partially known TX and single node session timeout")
107+
self.test_single_node_session_timeout(False)
108+
self.log.info("repeating test, but with cycled LLMQs")
109+
self.test_single_node_session_timeout(True)
110+
111+
def cycle_llmqs(self):
112+
self.mine_quorum()
113+
self.mine_quorum()
114+
self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash())
115+
116+
def test_all_nodes_session_timeout(self, do_cycle_llmqs):
117+
set_node_times(self.nodes, self.mocktime)
118+
isolate_node(self.nodes[3])
119+
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1})
120+
rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex']
121+
rawtx = self.nodes[0].signrawtransaction(rawtx)['hex']
122+
txid = self.nodes[0].sendrawtransaction(rawtx)
123+
txid = self.nodes[3].sendrawtransaction(rawtx)
124+
# Make sure nodes 1 and 2 received the TX before we continue
125+
self.wait_for_tx(txid, self.nodes[1])
126+
self.wait_for_tx(txid, self.nodes[2])
127+
# Make sure signing is done on nodes 1 and 2 (it's async)
128+
time.sleep(5)
129+
# Make the signing session for the IS lock timeout on nodes 1-3
130+
self.bump_mocktime(61)
131+
set_node_times(self.nodes, self.mocktime)
132+
time.sleep(2) # make sure Cleanup() is called
133+
reconnect_isolated_node(self.nodes[3], 0)
134+
self.wait_for_mnauth(self.nodes[3], 2)
135+
# node 3 fully reconnected but the signing session is already timed out on all nodes, so no IS lock
136+
self.wait_for_instantlock(txid, self.nodes[0], False, 5)
137+
if do_cycle_llmqs:
138+
self.cycle_llmqs()
139+
self.wait_for_instantlock(txid, self.nodes[0], False, 5)
140+
# Make node 0 consider the TX as safe
141+
self.bump_mocktime(10 * 60 + 1)
142+
self.nodes[0].setmocktime(self.mocktime)
143+
block = self.nodes[0].generate(1)[0]
144+
self.wait_for_chainlocked_block_all_nodes(block)
145+
146+
def test_single_node_session_timeout(self, do_cycle_llmqs):
147+
set_node_times(self.nodes, self.mocktime)
148+
isolate_node(self.nodes[3])
149+
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1})
150+
rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex']
151+
rawtx = self.nodes[0].signrawtransaction(rawtx)['hex']
152+
txid = self.nodes[3].sendrawtransaction(rawtx)
153+
time.sleep(2) # make sure signing is done on node 2 (it's async)
154+
# Make the signing session for the IS lock timeout on node 3
155+
self.bump_mocktime(61)
156+
set_node_times(self.nodes, self.mocktime)
157+
time.sleep(2) # make sure Cleanup() is called
158+
reconnect_isolated_node(self.nodes[3], 0)
159+
self.wait_for_mnauth(self.nodes[3], 2)
160+
self.nodes[0].sendrawtransaction(rawtx)
161+
# Make sure nodes 1 and 2 received the TX
162+
self.wait_for_tx(txid, self.nodes[1])
163+
self.wait_for_tx(txid, self.nodes[2])
164+
# Make sure signing is done on nodes 1 and 2 (it's async)
165+
time.sleep(5)
166+
# node 3 fully reconnected but the signing session is already timed out on it, so no IS lock
167+
self.wait_for_instantlock(txid, self.nodes[0], False, 1)
168+
if do_cycle_llmqs:
169+
self.cycle_llmqs()
170+
self.wait_for_instantlock(txid, self.nodes[0], False, 5)
171+
# Make node 0 consider the TX as safe
172+
self.bump_mocktime(10 * 60 + 1)
173+
self.nodes[0].setmocktime(self.mocktime)
174+
block = self.nodes[0].generate(1)[0]
175+
self.wait_for_chainlocked_block_all_nodes(block)
176+
177+
if __name__ == '__main__':
178+
LLMQ_IS_RetroactiveSigning().main()

qa/rpc-tests/test_framework/test_framework.py

Lines changed: 22 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -555,13 +555,23 @@ def send_complex_tx(self, sender, receiver):
555555
self.sync_all()
556556
return self.wait_for_instantlock(txid, sender)
557557

558-
def wait_for_instantlock(self, txid, node):
558+
def wait_for_tx(self, txid, node, expected=True, timeout=15):
559+
def check_tx():
560+
try:
561+
return node.getrawtransaction(txid)
562+
except:
563+
return False
564+
if wait_until(check_tx, timeout=timeout, sleep=0.5, do_assert=expected) and not expected:
565+
raise AssertionError("waiting unexpectedly succeeded")
566+
567+
def wait_for_instantlock(self, txid, node, expected=True, timeout=15):
559568
def check_instantlock():
560569
try:
561570
return node.getrawtransaction(txid, True)["instantlock"]
562571
except:
563572
return False
564-
return wait_until(check_instantlock, timeout=10, sleep=0.5)
573+
if wait_until(check_instantlock, timeout=timeout, sleep=0.5, do_assert=expected) and not expected:
574+
raise AssertionError("waiting unexpectedly succeeded")
565575

566576
def wait_for_chainlocked_block(self, node, block_hash, expected=True, timeout=15):
567577
def check_chainlocked_block():
@@ -712,6 +722,16 @@ def mine_quorum(self, expected_contributions=5, expected_complaints=0, expected_
712722

713723
return new_quorum
714724

725+
def wait_for_mnauth(self, node, count, timeout=10):
726+
def test():
727+
pi = node.getpeerinfo()
728+
c = 0
729+
for p in pi:
730+
if "verified_proregtx_hash" in p and p["verified_proregtx_hash"] != "":
731+
c += 1
732+
return c >= count
733+
wait_until(test, timeout=timeout)
734+
715735
# Test framework for doing p2p comparison testing, which sets up some bitcoind
716736
# binaries:
717737
# 1 binary: test binary

src/llmq/quorums_instantsend.cpp

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -374,7 +374,7 @@ void CInstantSendManager::InterruptWorkerThread()
374374
workInterrupt();
375375
}
376376

377-
bool CInstantSendManager::ProcessTx(const CTransaction& tx, const Consensus::Params& params)
377+
bool CInstantSendManager::ProcessTx(const CTransaction& tx, bool allowReSigning, const Consensus::Params& params)
378378
{
379379
if (!IsNewInstantSendEnabled()) {
380380
return true;
@@ -444,7 +444,7 @@ bool CInstantSendManager::ProcessTx(const CTransaction& tx, const Consensus::Par
444444
return false;
445445
}
446446
}
447-
if (alreadyVotedCount == ids.size()) {
447+
if (!allowReSigning && alreadyVotedCount == ids.size()) {
448448
LogPrint("instantsend", "CInstantSendManager::%s -- txid=%s: already voted on all inputs, bailing out\n", __func__,
449449
tx.GetHash().ToString());
450450
return true;
@@ -457,9 +457,9 @@ bool CInstantSendManager::ProcessTx(const CTransaction& tx, const Consensus::Par
457457
auto& in = tx.vin[i];
458458
auto& id = ids[i];
459459
inputRequestIds.emplace(id);
460-
LogPrint("instantsend", "CInstantSendManager::%s -- txid=%s: trying to vote on input %s with id %s\n", __func__,
461-
tx.GetHash().ToString(), in.prevout.ToStringShort(), id.ToString());
462-
if (quorumSigningManager->AsyncSignIfMember(llmqType, id, tx.GetHash())) {
460+
LogPrint("instantsend", "CInstantSendManager::%s -- txid=%s: trying to vote on input %s with id %s. allowReSigning=%d\n", __func__,
461+
tx.GetHash().ToString(), in.prevout.ToStringShort(), id.ToString(), allowReSigning);
462+
if (quorumSigningManager->AsyncSignIfMember(llmqType, id, tx.GetHash(), allowReSigning)) {
463463
LogPrint("instantsend", "CInstantSendManager::%s -- txid=%s: voted on input %s with id %s\n", __func__,
464464
tx.GetHash().ToString(), in.prevout.ToStringShort(), id.ToString());
465465
}
@@ -1015,6 +1015,10 @@ void CInstantSendManager::SyncTransaction(const CTransaction& tx, const CBlockIn
10151015
return;
10161016
}
10171017

1018+
// This is different on develop as allowReSigning is passed in from the caller. In 0.14.0.x, we have to figure this out
1019+
// here to mimic develop.
1020+
bool allowReSigning = !inMempool && !isDisconnect;
1021+
10181022
uint256 islockHash;
10191023
{
10201024
LOCK(cs);
@@ -1037,7 +1041,7 @@ void CInstantSendManager::SyncTransaction(const CTransaction& tx, const CBlockIn
10371041

10381042
bool chainlocked = pindex && chainLocksHandler->HasChainLock(pindex->nHeight, pindex->GetBlockHash());
10391043
if (islockHash.IsNull() && !chainlocked) {
1040-
ProcessTx(tx, Params().GetConsensus());
1044+
ProcessTx(tx, allowReSigning, Params().GetConsensus());
10411045
}
10421046

10431047
LOCK(cs);
@@ -1421,7 +1425,7 @@ bool CInstantSendManager::ProcessPendingRetryLockTxs()
14211425
tx->GetHash().ToString());
14221426
}
14231427

1424-
ProcessTx(*tx, Params().GetConsensus());
1428+
ProcessTx(*tx, false, Params().GetConsensus());
14251429
retryCount++;
14261430
}
14271431

src/llmq/quorums_instantsend.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@ class CInstantSendManager : public CRecoveredSigsListener
120120
void InterruptWorkerThread();
121121

122122
public:
123-
bool ProcessTx(const CTransaction& tx, const Consensus::Params& params);
123+
bool ProcessTx(const CTransaction& tx, bool allowReSigning, const Consensus::Params& params);
124124
bool CheckCanLock(const CTransaction& tx, bool printDebug, const Consensus::Params& params);
125125
bool CheckCanLock(const COutPoint& outpoint, bool printDebug, const uint256& txHash, CAmount* retValue, const Consensus::Params& params);
126126
bool IsLocked(const uint256& txHash);

src/llmq/quorums_signing.cpp

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -743,7 +743,7 @@ void CSigningManager::UnregisterRecoveredSigsListener(CRecoveredSigsListener* l)
743743
recoveredSigsListeners.erase(itRem, recoveredSigsListeners.end());
744744
}
745745

746-
bool CSigningManager::AsyncSignIfMember(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash)
746+
bool CSigningManager::AsyncSignIfMember(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash, bool allowReSign)
747747
{
748748
auto& params = Params().GetConsensus().llmqs.at(llmqType);
749749

@@ -754,24 +754,31 @@ bool CSigningManager::AsyncSignIfMember(Consensus::LLMQType llmqType, const uint
754754
{
755755
LOCK(cs);
756756

757-
if (db.HasVotedOnId(llmqType, id)) {
757+
bool hasVoted = db.HasVotedOnId(llmqType, id);
758+
if (hasVoted) {
758759
uint256 prevMsgHash;
759760
db.GetVoteForId(llmqType, id, prevMsgHash);
760761
if (msgHash != prevMsgHash) {
761762
LogPrintf("CSigningManager::%s -- already voted for id=%s and msgHash=%s. Not voting on conflicting msgHash=%s\n", __func__,
762763
id.ToString(), prevMsgHash.ToString(), msgHash.ToString());
764+
return false;
765+
} else if (allowReSign) {
766+
LogPrint("llmq", "CSigningManager::%s -- already voted for id=%s and msgHash=%s. Resigning!\n", __func__,
767+
id.ToString(), prevMsgHash.ToString());
763768
} else {
764769
LogPrint("llmq", "CSigningManager::%s -- already voted for id=%s and msgHash=%s. Not voting again.\n", __func__,
765770
id.ToString(), prevMsgHash.ToString());
771+
return false;
766772
}
767-
return false;
768773
}
769774

770775
if (db.HasRecoveredSigForId(llmqType, id)) {
771776
// no need to sign it if we already have a recovered sig
772777
return true;
773778
}
774-
db.WriteVoteForId(llmqType, id, msgHash);
779+
if (!hasVoted) {
780+
db.WriteVoteForId(llmqType, id, msgHash);
781+
}
775782
}
776783

777784
int tipHeight;
@@ -796,6 +803,10 @@ bool CSigningManager::AsyncSignIfMember(Consensus::LLMQType llmqType, const uint
796803
return false;
797804
}
798805

806+
if (allowReSign) {
807+
// make us re-announce all known shares (other nodes might have run into a timeout)
808+
quorumSigSharesManager->ForceReAnnouncement(quorum, llmqType, id, msgHash);
809+
}
799810
quorumSigSharesManager->AsyncSign(quorum, id, msgHash);
800811

801812
return true;

src/llmq/quorums_signing.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,7 @@ class CSigningManager
167167
void RegisterRecoveredSigsListener(CRecoveredSigsListener* l);
168168
void UnregisterRecoveredSigsListener(CRecoveredSigsListener* l);
169169

170-
bool AsyncSignIfMember(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash);
170+
bool AsyncSignIfMember(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash, bool allowReSign = false);
171171
bool HasRecoveredSig(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash);
172172
bool HasRecoveredSigForId(Consensus::LLMQType llmqType, const uint256& id);
173173
bool HasRecoveredSigForSession(const uint256& signHash);

0 commit comments

Comments
 (0)