diff --git a/doc/Doxyfile b/doc/Doxyfile index f091a69adfadb..a3e0b347e6869 100644 --- a/doc/Doxyfile +++ b/doc/Doxyfile @@ -41,7 +41,7 @@ PROJECT_NAME = "BiblePay Core" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 0.14.0.4 +PROJECT_NUMBER = 0.14.0.5 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/doc/release-notes.md b/doc/release-notes.md index 309f1c737f5d7..caa991b0e8686 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -1,15 +1,17 @@ -Biblepay Core version 0.14.0.0 +Biblepay Core version 1.4.8.5 ========================== Release is now available from: - + -This is a new minor version release, bringing various bugfixes. +This is a new minor version release, bringing various bugfixes and improvements. +It is highly recommended to upgrade to this release as it contains a critical +fix for a possible DoS vector. Please report bugs using the issue tracker at github: - + Upgrading and downgrading @@ -20,79 +22,65 @@ How to Upgrade If you are running an older version, shut it down. Wait until it has completely shut down (which might take a few minutes for older versions), then run the -installer (on Windows) or just copy over /Applications/biblepay-Qt (on Mac) or -biblepayd/biblepay-qt (on Linux). If you upgrade after DIP0003 activation you will -have to reindex (start with -reindex-chainstate or -reindex) to make sure -your wallet has all the new data synced (only if you were using version < 0.13). - -Note that there is no protocol bump in this version and thus active masternodes -updating from v0.13.0.0 or v0.13.1.0 do not require any additional actions (no need to issue -`masternode start` command). -======= -installer (on Windows) or just copy over /Applications/Dash-Qt (on Mac) or +installer (on Windows) or just copy over /Applications/Biblepay-Qt (on Mac) or dashd/dash-qt (on Linux). If you upgrade after DIP0003 activation and you were using version < 0.13 you will have to reindex (start with -reindex-chainstate or -reindex) to make sure your wallet has all the new data synced. Upgrading from version 0.13 should not require any additional actions. +When upgrading from a version prior to 0.14.0.3, the +first startup of Biblepay Core will run a migration process which can take a few minutes +to finish. After the migration, a downgrade to an older version is only possible with +a reindex (or reindex-chainstate). + Downgrade warning ----------------- -### Downgrade to a version < 0.13.0.0 - -Downgrading to a version smaller than 0.13 is not supported anymore as DIP2/DIP3 has -activated on mainnet and testnet. +### Downgrade to a version < 0.14.0.3 -### Downgrade to versions 0.13.0.0 - 0.13.3.0 - -Downgrading to 0.13 releases is fully supported until DIP0008 activation but is not -recommended unless you have some serious issues with version 0.14. +Downgrading to a version smaller than 0.14.0.3 is not supported anymore due to changes +in the "evodb" database format. If you need to use an older version, you have to perform +a reindex or re-sync the whole chain. Notable changes =============== -Fixed governance votes pruning for invalid masternodes ------------------------------------------------------- -A community member reported a possible attack that involves DoSing masternodes to force the network -to prune all governance votes from this masternodes. This could be used to manipulate vote outcomes. +Fix for a DoS vector +-------------------- -See detailed [set of changes](https://github.com/biblepaypay/biblepay/compare/v0.13.1.0...biblepaypay:v0.13.2.0). -======= -This vulnerability is currently not possible to execute as LLMQ DKGs and PoSe have not activated yet on -mainnet. This version includes a fix that requires to have at least 51% masternodes to upgrade to -0.14.0.1, after which superblock trigger voting will automatically fix the discrepancies between -old and new nodes. This also means that we will postpone activation of LLMQ DKGs and thus PoSe until -at least 51% of masternodes have upgraded to 0.14.0.1. - -Fixed a rare memory/db leak in LLMQ based InstantSend ------------------------------------------------------ -We fixed a rare memory/db leak in LLMQ based InstantSend leak which would only occur when reorganizations -would happen. - -- [`548a48918`](https://github.com/biblepaypay/biblepay/commit/548a48918) Move IS block filtering into ConnectBlock (#2766) -- [`6374dce99`](https://github.com/biblepaypay/biblepay/commit/6374dce99) Fix error message for invalid voting addresses (#2747) -- [`25222b378`](https://github.com/biblepaypay/biblepay/commit/25222b378) Make -masternodeblsprivkey mandatory when -masternode is given (#2745) -- [`0364e033a`](https://github.com/biblepaypay/biblepay/commit/0364e033a) Implement 2-stage commit for CEvoDB to avoid inconsistencies after crashes (#2744) -- [`a11e2f9eb`](https://github.com/biblepaypay/biblepay/commit/a11e2f9eb) Add collateraladdress into masternode/protx list rpc output (#2740) -- [`43612a272`](https://github.com/biblepaypay/biblepay/commit/43612a272) Only include selected TX types into CMerkleBlock (#2737) -- [`f868fbc78`](https://github.com/biblepaypay/biblepay/commit/f868fbc78) Stop g_connman first before deleting it (#2734) -- [`9e233f391`](https://github.com/biblepaypay/biblepay/commit/9e233f391) Fix incorrect usage of begin() when genesis block is requested in "protx diff" (#2699) -- [`e75f971b9`](https://github.com/biblepaypay/biblepay/commit/e75f971b9) Do not process blocks in CDeterministicMNManager before dip3 activation (#2698) -- [`1cc47ebcd`](https://github.com/biblepaypay/biblepay/commit/1cc47ebcd) Backport #14701: build: Add CLIENT_VERSION_BUILD to CFBundleGetInfoString (#2687) -======= -0.14.0.1 Change log -=================== +This release fixes a serious DoS vector which allows to cause memory exhaustion until the point of +out-of-memory related crashes. We highly recommend upgrading all nodes. Thanks to Bitcoin ABC +developers for finding and reporting this issue to us. -See detailed [set of changes](https://github.com/dashpay/dash/compare/v0.14.0.0...dashpay:v0.14.0.1). +Better handling of non-locked transactions in mined blocks +---------------------------------------------------------- -- [`2516a6e19`](https://github.com/biblepaypay/biblepay/commit/2516a6e19) Bump version to 0.13.2 -- [`9dd16cdbe`](https://github.com/biblepaypay/biblepay/commit/9dd16cdbe) Bump minChainWork and AssumeValid to block #1033120 (#2750) -- [`18f087b27`](https://github.com/biblepaypay/biblepay/commit/18f087b27) Fix some typos in doc/guide-startmany.md (#2711) -- [`709ab6d3e`](https://github.com/biblepaypay/biblepay/commit/709ab6d3e) Minimal fix for litemode vs bad-protx-key-not-same issue (#2694) -======= -- [`a2baa93ec`](https://github.com/dashpay/dash/commit/a2baa93ec) Only require valid collaterals for votes and triggers (#2947) (#2957) -- [`b293e6dde`](https://github.com/dashpay/dash/commit/b293e6dde) Fix off-by-one error in InstantSend mining info removal when disconnecting blocks (#2951) -- [`276b6e3a8`](https://github.com/dashpay/dash/commit/276b6e3a8) bump version to 0.14.0.1 and prepare release notes (#2952) +We observed multiple cases of ChainLocks failing on mainnet. We tracked this down to a situation where +PrivateSend mixing transactions were first rejected by parts of the network (0.14.0.4 nodes) while other parts +(<=0.14.0.3) accepted the transaction into the mempool. This caused InstantSend locking to fail for these +transactions, while non-upgraded miners still included the transactions into blocks after 10 minutes. +This caused blocks to not get ChainLocked for at least 10 minutes. This release improves an already existent +fallback mechanism (retroactive InstantSend locking) to also work for transaction which are already partially +known in the network. This should cause ChainLocks to succeed in such situations. + +0.14.0.5 Change log +=================== + +See detailed [set of changes](https://github.com/dashpay/dash/compare/v0.14.0.4...dashpay:v0.14.0.5). + +- [`20d4a27778`](https://github.com/dashpay/dash/commit/dc07a0c5e1) Make sure mempool txes are properly processed by CChainLocksHandler despite node restarts (#3230) +- [`dc07a0c5e1`](https://github.com/dashpay/dash/commit/dc07a0c5e1) [v0.14.0.x] Bump version and prepare release notes (#3228) +- [`401da32090`](https://github.com/dashpay/dash/commit/401da32090) More fixes in llmq-is-retroactive tests +- [`33721eaa11`](https://github.com/dashpay/dash/commit/33721eaa11) Make llmq-is-retroactive test compatible with 0.14.0.x +- [`85bd162a3e`](https://github.com/dashpay/dash/commit/85bd162a3e) Make wait_for_xxx methods compatible with 0.14.0.x +- [`22cfddaf12`](https://github.com/dashpay/dash/commit/22cfddaf12) Allow re-signing of IS locks when performing retroactive signing (#3219) +- [`a8b8891a1d`](https://github.com/dashpay/dash/commit/a8b8891a1d) Add wait_for_xxx methods as found in develop +- [`8dae12cc60`](https://github.com/dashpay/dash/commit/8dae12cc60) More/better logging for InstantSend +- [`fdd19cf667`](https://github.com/dashpay/dash/commit/fdd19cf667) Tests: Fix the way nodes are connected to each other in setup_network/start_masternodes (#3221) +- [`41f0e9d028`](https://github.com/dashpay/dash/commit/41f0e9d028) More fixes related to extra_args +- [`5213118601`](https://github.com/dashpay/dash/commit/5213118601) Tests: Allow specifying different cmd-line params for each masternode (#3222) +- [`2fef21fd80`](https://github.com/dashpay/dash/commit/2fef21fd80) Don't join thread in CQuorum::~CQuorum when called from within the thread (#3223) +- [`e69c6c3207`](https://github.com/dashpay/dash/commit/e69c6c3207) Merge #12392: Fix ignoring tx data requests when fPauseSend is set on a peer (#3225) Credits ======= @@ -100,7 +88,6 @@ Credits Thanks to everyone who directly contributed to this release: - Alexander Block (codablock) -- demodun6 - UdjinM6 As well as everyone that submitted issues and reviewed pull requests. @@ -128,6 +115,10 @@ Dash Core tree 0.12.1.x was a fork of Bitcoin Core tree 0.12. These release are considered obsolete. Old release notes can be found here: +- [v0.14.0.4](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.14.0.4.md) released November/22/2019 +- [v0.14.0.3](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.14.0.3.md) released August/15/2019 +- [v0.14.0.2](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.14.0.2.md) released July/4/2019 +- [v0.14.0.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.14.0.1.md) released May/31/2019 - [v0.14.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.14.0.md) released May/22/2019 - [v0.13.3](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.13.3.md) released Apr/04/2019 - [v0.13.2](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.13.2.md) released Mar/15/2019 diff --git a/doc/release-notes/dash/release-notes-0.14.0.4.md b/doc/release-notes/dash/release-notes-0.14.0.4.md new file mode 100644 index 0000000000000..2dfd8b0ece92f --- /dev/null +++ b/doc/release-notes/dash/release-notes-0.14.0.4.md @@ -0,0 +1,177 @@ +Dash Core version 0.14.0.4 +========================== + +Release is now available from: + + + +This is a new minor version release, bringing various bugfixes and improvements. + +Please report bugs using the issue tracker at github: + + + + +Upgrading and downgrading +========================= + +How to Upgrade +-------------- + +If you are running an older version, shut it down. Wait until it has completely +shut down (which might take a few minutes for older versions), then run the +installer (on Windows) or just copy over /Applications/Dash-Qt (on Mac) or +dashd/dash-qt (on Linux). If you upgrade after DIP0003 activation and you were +using version < 0.13 you will have to reindex (start with -reindex-chainstate +or -reindex) to make sure your wallet has all the new data synced. Upgrading from +version 0.13 should not require any additional actions. + +When upgrading from a version prior to 0.14.0.3, the +first startup of Dash Core will run a migration process which can take a few minutes +to finish. After the migration, a downgrade to an older version is only possible with +a reindex (or reindex-chainstate). + +Downgrade warning +----------------- + +### Downgrade to a version < 0.14.0.3 + +Downgrading to a version smaller than 0.14.0.3 is not supported anymore due to changes +in the "evodb" database format. If you need to use an older version, you have to perform +a reindex or re-sync the whole chain. + +Notable changes +=============== + +Fix respends of freshly received InstantSend transactions +--------------------------------------------------------- + +A bug in Dash Core caused respends to not work before a received InstantSend transaction was confirmed in at least +one block. This is fixed in this release, so that InstantSend locked mempool transactions can be +respent immediately in Dash Core (other wallets were not affected). + +Deprecation of SPORK_16_INSTANTSEND_AUTOLOCKS +--------------------------------------------- + +With the activation of SPORK_20_INSTANTSEND_LLMQ_BASED a few month ago, all transactions started to be locked via +InstantSend, which already partly deprecated SPORK_16_INSTANTSEND_AUTOLOCKS. This release removes the last use +of SPORK_16_INSTANTSEND_AUTOLOCKS, which caused InstantSend to stop working when the mempool got too large. + +Improve orphan transaction limit handling +----------------------------------------- + +Instead of limiting orphan transaction by number of transaction, we limit orphans by total size in bytes +now. This allows to have thousands of orphan transactions before hitting the limit. + +Discrepancies in orphan sets between nodes and handling of those was one of the major limiting factors in +the stress tests performed by an unknown entity on mainnet. + +Improve re-requesting for already known transactions +---------------------------------------------------- + +Previously, Dash would re-request old transactions even though they were already known locally. This +happened when the outputs were respent very shortly after confirmation of the transaction. This lead to +wrongly handling these transactions as orphans, filling up the orphan set and hitting limits very fast. +This release fixes this for nodes which have txindex enabled, which is the case for all masternodes. Normal +nodes (without txindex) can ignore the issue as they are not involved in active InstantSend locking. + +Another issue fixed in this release is the re-requesting of transactions after an InstantSend lock invalidated +a conflicting transaction. + +Multiple improvements to PrivateSend +------------------------------------ + +Multiple improvements to PrivateSend are introduced in this release, leading to faster mixing and more +reasonable selection of UTXOs when sending PrivateSend funds. + +Fix for CVE-2017-18350 +---------------------- + +Bitcoin silently implemented a hidden fix for [CVE-2017-18350](https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2019-November/017453.html). +in Bitcoin v0.15.1. This release of Dash Core includes a backport of this fix. + + +0.14.0.4 Change log +=================== + +See detailed [set of changes](https://github.com/dashpay/dash/compare/v0.14.0.3...dashpay:v0.14.0.4). + +- [`5f98ed7a5`](https://github.com/dashpay/dash/commit/5f98ed7a5) [v0.14.0.x] Bump version to 0.14.0.4 and draft release notes (#3203) +- [`c0dda38fe`](https://github.com/dashpay/dash/commit/c0dda38fe) Circumvent BIP69 sorting in fundrawtransaction.py test (#3100) +- [`64ae6365f`](https://github.com/dashpay/dash/commit/64ae6365f) Fix compile issues +- [`36473015b`](https://github.com/dashpay/dash/commit/36473015b) Merge #11397: net: Improve and document SOCKS code +- [`66e298728`](https://github.com/dashpay/dash/commit/66e298728) Slightly optimize ApproximateBestSubset and its usage for PS txes (#3184) +- [`16b6b6f7c`](https://github.com/dashpay/dash/commit/16b6b6f7c) Update activemn if protx info changed (#3176) +- [`ce6687130`](https://github.com/dashpay/dash/commit/ce6687130) Actually update spent index on DisconnectBlock (#3167) +- [`9b49bfda8`](https://github.com/dashpay/dash/commit/9b49bfda8) Only track last seen time instead of first and last seen time (#3165) +- [`ad720eef1`](https://github.com/dashpay/dash/commit/ad720eef1) Merge #17118: build: depends macOS: point --sysroot to SDK (#3161) +- [`909d6a4ba`](https://github.com/dashpay/dash/commit/909d6a4ba) Simulate BlockConnected/BlockDisconnected for PS caches +- [`db7f471c7`](https://github.com/dashpay/dash/commit/db7f471c7) Few fixes related to SelectCoinsGroupedByAddresses (#3144) +- [`1acd4742c`](https://github.com/dashpay/dash/commit/1acd4742c) Various fixes for mixing queues (#3138) +- [`0031d6b04`](https://github.com/dashpay/dash/commit/0031d6b04) Also consider txindex for transactions in AlreadyHave() (#3126) +- [`c4be5ac4d`](https://github.com/dashpay/dash/commit/c4be5ac4d) Ignore recent rejects filter for locked txes (#3124) +- [`f2d401aa8`](https://github.com/dashpay/dash/commit/f2d401aa8) Make orphan TX map limiting dependent on total TX size instead of TX count (#3121) +- [`87ff566a0`](https://github.com/dashpay/dash/commit/87ff566a0) Update/modernize macOS plist (#3074) +- [`2141d5f9d`](https://github.com/dashpay/dash/commit/2141d5f9d) Fix bip69 vs change position issue (#3063) +- [`75fddde67`](https://github.com/dashpay/dash/commit/75fddde67) Partially revert 3061 (#3150) +- [`c74f2cd8b`](https://github.com/dashpay/dash/commit/c74f2cd8b) Fix SelectCoinsMinConf to allow instant respends (#3061) +- [`2e7ec2369`](https://github.com/dashpay/dash/commit/2e7ec2369) [0.14.0.x] Remove check for mempool size in CInstantSendManager::CheckCanLock (#3119) + +Credits +======= + +Thanks to everyone who directly contributed to this release: + +- Alexander Block (codablock) +- Nathan Marley (nmarley) +- PastaPastaPasta +- UdjinM6 + +As well as everyone that submitted issues and reviewed pull requests. + +Older releases +============== + +Dash was previously known as Darkcoin. + +Darkcoin tree 0.8.x was a fork of Litecoin tree 0.8, original name was XCoin +which was first released on Jan/18/2014. + +Darkcoin tree 0.9.x was the open source implementation of masternodes based on +the 0.8.x tree and was first released on Mar/13/2014. + +Darkcoin tree 0.10.x used to be the closed source implementation of Darksend +which was released open source on Sep/25/2014. + +Dash Core tree 0.11.x was a fork of Bitcoin Core tree 0.9, +Darkcoin was rebranded to Dash. + +Dash Core tree 0.12.0.x was a fork of Bitcoin Core tree 0.10. + +Dash Core tree 0.12.1.x was a fork of Bitcoin Core tree 0.12. + +These release are considered obsolete. Old release notes can be found here: + +- [v0.14.0.3](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.14.0.3.md) released August/15/2019 +- [v0.14.0.2](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.14.0.2.md) released July/4/2019 +- [v0.14.0.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.14.0.1.md) released May/31/2019 +- [v0.14.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.14.0.md) released May/22/2019 +- [v0.13.3](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.13.3.md) released Apr/04/2019 +- [v0.13.2](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.13.2.md) released Mar/15/2019 +- [v0.13.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.13.1.md) released Feb/9/2019 +- [v0.13.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.13.0.md) released Jan/14/2019 +- [v0.12.3.4](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.12.3.4.md) released Dec/14/2018 +- [v0.12.3.3](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.12.3.3.md) released Sep/19/2018 +- [v0.12.3.2](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.12.3.2.md) released Jul/09/2018 +- [v0.12.3.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.12.3.1.md) released Jul/03/2018 +- [v0.12.2.3](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.12.2.3.md) released Jan/12/2018 +- [v0.12.2.2](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.12.2.2.md) released Dec/17/2017 +- [v0.12.2](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.12.2.md) released Nov/08/2017 +- [v0.12.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.12.1.md) released Feb/06/2017 +- [v0.12.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.12.0.md) released Aug/15/2015 +- [v0.11.2](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.11.2.md) released Mar/04/2015 +- [v0.11.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.11.1.md) released Feb/10/2015 +- [v0.11.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.11.0.md) released Jan/15/2015 +- [v0.10.x](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.10.0.md) released Sep/25/2014 +- [v0.9.x](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.9.0.md) released Mar/13/2014 + diff --git a/qa/pull-tester/rpc-tests.py b/qa/pull-tester/rpc-tests.py index 624ec3c75eb6c..b27d7e047acd8 100755 --- a/qa/pull-tester/rpc-tests.py +++ b/qa/pull-tester/rpc-tests.py @@ -2,25 +2,21 @@ # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. - """ -Run Regression Test Suite +rpc-tests.py - run regression test suite This module calls down into individual test cases via subprocess. It will -forward all unrecognized arguments onto the individual test scripts, other -than: +forward all unrecognized arguments onto the individual test scripts. - - `-extended`: run the "extended" test suite in addition to the basic one. - - `-win`: signal that this is running in a Windows environment, and we - should run the tests. - - `--coverage`: this generates a basic coverage report for the RPC - interface. +RPC tests are disabled on Windows by default. Use --force to run them anyway. For a description of arguments recognized by test scripts, see `qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`. """ +import argparse +import configparser import os import time import shutil @@ -29,95 +25,18 @@ import tempfile import re -sys.path.append("qa/pull-tester/") -from tests_config import * - -BOLD = ("","") -if os.name == 'posix': - # primitive formatting on supported - # terminal via ANSI escape sequences: - BOLD = ('\033[0m', '\033[1m') - -RPC_TESTS_DIR = SRCDIR + '/qa/rpc-tests/' - -#If imported values are not defined then set to zero (or disabled) -if 'ENABLE_WALLET' not in vars(): - ENABLE_WALLET=0 -if 'ENABLE_BITCOIND' not in vars(): - ENABLE_BITCOIND=0 -if 'ENABLE_UTILS' not in vars(): - ENABLE_UTILS=0 -if 'ENABLE_ZMQ' not in vars(): - ENABLE_ZMQ=0 - -# python-zmq may not be installed. Handle this gracefully and with some helpful info -if ENABLE_ZMQ: - try: - import zmq - except ImportError: - print("WARNING: \"import zmq\" failed. Setting ENABLE_ZMQ=0. " \ - "To run zmq tests, see dependency info in /qa/README.md.") - ENABLE_ZMQ=0 - -ENABLE_COVERAGE=0 - -#Create a set to store arguments and create the passon string -opts = set() -passon_args = [] -PASSON_REGEX = re.compile("^--") -PARALLEL_REGEX = re.compile('^-parallel=') - -print_help = False -run_parallel = 4 - -for arg in sys.argv[1:]: - if arg == "--help" or arg == "-h" or arg == "-?": - print_help = True - break - if arg == '--coverage': - ENABLE_COVERAGE = 1 - elif PASSON_REGEX.match(arg): - passon_args.append(arg) - elif PARALLEL_REGEX.match(arg): - run_parallel = int(arg.split(sep='=', maxsplit=1)[1]) - else: - opts.add(arg) - -#Set env vars -if "BIBLEPAYD" not in os.environ: - os.environ["BIBLEPAYD"] = BUILDDIR + '/src/biblepayd' + EXEEXT - -if EXEEXT == ".exe" and "-win" not in opts: - # https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9 - # https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964 - print("Win tests currently disabled by default. Use -win option to enable") - sys.exit(0) - -if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1): - print("No rpc tests to run. Wallet, utils, and bitcoind must all be enabled") - sys.exit(0) - -# python3-zmq may not be installed. Handle this gracefully and with some helpful info -if ENABLE_ZMQ: - try: - import zmq - except ImportError: - print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or " - "to run zmq tests, see dependency info in /qa/README.md.") - # ENABLE_ZMQ=0 - raise - -testScripts = [ - # longest test should go first, to favor running tests in parallel - 'dip3-deterministicmns.py', # NOTE: needs biblepay_hash to pass +BASE_SCRIPTS= [ + # Scripts that are run by the travis build process. + # Longest test should go first, to favor running tests in parallel + 'dip3-deterministicmns.py', # NOTE: needs dash_hash to pass 'wallet-hd.py', 'walletbackup.py', # vv Tests less than 5m vv - 'p2p-fullblocktest.py', # NOTE: needs biblepay_hash to pass + 'p2p-fullblocktest.py', # NOTE: needs dash_hash to pass 'fundrawtransaction.py', 'fundrawtransaction-hd.py', 'p2p-autoinstantsend.py', - 'autoix-mempool.py', + 'autois-mempool.py', # vv Tests less than 2m vv 'p2p-instantsend.py', 'wallet.py', @@ -125,8 +44,15 @@ 'wallet-dump.py', 'listtransactions.py', 'multikeysporks.py', + 'llmq-signing.py', # NOTE: needs dash_hash to pass + 'llmq-chainlocks.py', # NOTE: needs dash_hash to pass + 'llmq-simplepose.py', # NOTE: needs dash_hash to pass + 'llmq-is-cl-conflicts.py', # NOTE: needs dash_hash to pass + 'llmq-is-retroactive.py', # NOTE: needs dash_hash to pass + 'llmq-dkgerrors.py', # NOTE: needs dash_hash to pass + 'dip4-coinbasemerkleroots.py', # NOTE: needs dash_hash to pass # vv Tests less than 60s vv - 'sendheaders.py', # NOTE: needs biblepay_hash to pass + 'sendheaders.py', # NOTE: needs dash_hash to pass 'zapwallettxes.py', 'importmulti.py', 'mempool_limit.py', @@ -159,8 +85,8 @@ 'keypool-hd.py', 'p2p-mempool.py', 'prioritise_transaction.py', - 'invalidblockrequest.py', # NOTE: needs biblepay_hash to pass - 'invalidtxrequest.py', # NOTE: needs biblepay_hash to pass + 'invalidblockrequest.py', # NOTE: needs dash_hash to pass + 'invalidtxrequest.py', # NOTE: needs dash_hash to pass 'p2p-versionbits-warning.py', 'preciousblock.py', 'importprunedfunds.py', @@ -172,11 +98,17 @@ 'p2p-leaktests.py', 'p2p-compactblocks.py', 'sporks.py', + 'p2p-fingerprint.py', ] -if ENABLE_ZMQ: - testScripts.append('zmq_test.py') -testScriptsExt = [ +ZMQ_SCRIPTS = [ + # ZMQ test can only be run if Dash Core was built with zmq-enabled. + # call rpc_tests.py with -nozmq to explicitly exclude these tests. + "zmq_test.py"] + +EXTENDED_SCRIPTS = [ + # These tests are not run by the travis build process. + # Longest test should go first, to favor running tests in parallel # 'pruning.py', # Prune mode is incompatible with -txindex. # vv Tests less than 20m vv 'smartfees.py', @@ -192,8 +124,8 @@ 'rpcbind_test.py', # vv Tests less than 30s vv 'bip65-cltv.py', - 'bip65-cltv-p2p.py', # NOTE: needs biblepay_hash to pass - 'bipdersig-p2p.py', # NOTE: needs biblepay_hash to pass + 'bip65-cltv-p2p.py', # NOTE: needs dash_hash to pass + 'bipdersig-p2p.py', # NOTE: needs dash_hash to pass 'bipdersig.py', 'getblocktemplate_proposals.py', 'txn_doublespend.py', @@ -201,47 +133,134 @@ 'forknotify.py', 'invalidateblock.py', 'maxblocksinflight.py', - 'p2p-acceptblock.py', # NOTE: needs biblepay_hash to pass + 'p2p-acceptblock.py', # NOTE: needs dash_hash to pass ] +ALL_SCRIPTS = BASE_SCRIPTS + ZMQ_SCRIPTS + EXTENDED_SCRIPTS + +def main(): + # Parse arguments and pass through unrecognised args + parser = argparse.ArgumentParser(add_help=False, + usage='%(prog)s [rpc-test.py options] [script options] [scripts]', + description=__doc__, + epilog=''' + Help text and arguments for individual test script:''', + formatter_class=argparse.RawTextHelpFormatter) + parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface') + parser.add_argument('--exclude', '-x', help='specify a comma-seperated-list of scripts to exclude. Do not include the .py extension in the name.') + parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests') + parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).') + parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit') + parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.') + parser.add_argument('--nozmq', action='store_true', help='do not run the zmq tests') + args, unknown_args = parser.parse_known_args() + + # Create a set to store arguments and create the passon string + tests = set(arg for arg in unknown_args if arg[:2] != "--") + passon_args = [arg for arg in unknown_args if arg[:2] == "--"] + + # Read config generated by configure. + config = configparser.ConfigParser() + config.read_file(open(os.path.dirname(__file__) + "/tests_config.ini")) + + enable_wallet = config["components"].getboolean("ENABLE_WALLET") + enable_utils = config["components"].getboolean("ENABLE_UTILS") + enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND") + enable_zmq = config["components"].getboolean("ENABLE_ZMQ") and not args.nozmq + + if config["environment"]["EXEEXT"] == ".exe" and not args.force: + # https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9 + # https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964 + print("Tests currently disabled on Windows by default. Use --force option to enable") + sys.exit(0) -def runtests(): - test_list = [] - if '-extended' in opts: - test_list = testScripts + testScriptsExt - elif len(opts) == 0 or (len(opts) == 1 and "-win" in opts): - test_list = testScripts + if not (enable_wallet and enable_utils and enable_bitcoind): + print("No rpc tests to run. Wallet, utils, and bitcoind must all be enabled") + print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make") + sys.exit(0) + + # python3-zmq may not be installed. Handle this gracefully and with some helpful info + if enable_zmq: + try: + import zmq + except ImportError: + print("ERROR: \"import zmq\" failed. Use -nozmq to run without the ZMQ tests." + "To run zmq tests, see dependency info in /qa/README.md.") + raise + + # Build list of tests + if tests: + # Individual tests have been specified. Run specified tests that exist + # in the ALL_SCRIPTS list. Accept the name with or without .py extension. + test_list = [t for t in ALL_SCRIPTS if + (t in tests or re.sub(".py$", "", t) in tests)] else: - for t in testScripts + testScriptsExt: - if t in opts or re.sub(".py$", "", t) in opts: - test_list.append(t) + # No individual tests have been specified. Run base tests, and + # optionally ZMQ tests and extended tests. + test_list = BASE_SCRIPTS + if enable_zmq: + test_list += ZMQ_SCRIPTS + if args.extended: + test_list += EXTENDED_SCRIPTS + # TODO: BASE_SCRIPTS and EXTENDED_SCRIPTS are sorted by runtime + # (for parallel running efficiency). This combined list will is no + # longer sorted. + + # Remove the test cases that the user has explicitly asked to exclude. + if args.exclude: + for exclude_test in args.exclude.split(','): + if exclude_test + ".py" in test_list: + test_list.remove(exclude_test + ".py") + + if not test_list: + print("No valid test scripts specified. Check that your test is in one " + "of the test lists in rpc-tests.py, or run rpc-tests.py with no arguments to run all tests") + sys.exit(0) - if print_help: - # Only print help of the first script and exit - subprocess.check_call((RPC_TESTS_DIR + test_list[0]).split() + ['-h']) + if args.help: + # Print help for rpc-tests.py, then print help of the first script and exit. + parser.print_help() + subprocess.check_call((config["environment"]["SRCDIR"] + '/qa/rpc-tests/' + test_list[0]).split() + ['-h']) sys.exit(0) - coverage = None + run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], config["environment"]["EXEEXT"], args.jobs, args.coverage, passon_args) - if ENABLE_COVERAGE: +def run_tests(test_list, src_dir, build_dir, exeext, jobs=1, enable_coverage=False, args=[]): + BOLD = ("","") + if os.name == 'posix': + # primitive formatting on supported + # terminal via ANSI escape sequences: + BOLD = ('\033[0m', '\033[1m') + + #Set env vars + if "BITCOIND" not in os.environ: + os.environ["BITCOIND"] = build_dir + '/src/dashd' + exeext + + tests_dir = src_dir + '/qa/rpc-tests/' + + flags = ["--srcdir={}/src".format(build_dir)] + args + flags.append("--cachedir=%s/qa/cache" % build_dir) + + if enable_coverage: coverage = RPCCoverage() - print("Initializing coverage directory at %s\n" % coverage.dir) - flags = ["--srcdir=%s/src" % BUILDDIR] + passon_args - flags.append("--cachedir=%s/qa/cache" % BUILDDIR) - if coverage: flags.append(coverage.flag) + print("Initializing coverage directory at %s\n" % coverage.dir) + else: + coverage = None - if len(test_list) > 1 and run_parallel > 1: + if len(test_list) > 1 and jobs > 1: # Populate cache - subprocess.check_output([RPC_TESTS_DIR + 'create_cache.py'] + flags) + subprocess.check_output([tests_dir + 'create_cache.py'] + flags) #Run Tests - max_len_name = len(max(test_list, key=len)) + all_passed = True time_sum = 0 time0 = time.time() - job_queue = RPCTestHandler(run_parallel, test_list, flags) + + job_queue = RPCTestHandler(jobs, tests_dir, test_list, flags) + + max_len_name = len(max(test_list, key=len)) results = BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "PASSED", "DURATION") + BOLD[0] - all_passed = True for _ in range(len(test_list)): (name, stdout, stderr, passed, duration) = job_queue.get_next() all_passed = all_passed and passed @@ -250,8 +269,10 @@ def runtests(): print('\n' + BOLD[1] + name + BOLD[0] + ":") print('' if passed else stdout + '\n', end='') print('' if stderr == '' else 'stderr:\n' + stderr + '\n', end='') - results += "%s | %s | %s s\n" % (name.ljust(max_len_name), str(passed).ljust(6), duration) print("Pass: %s%s%s, Duration: %s s\n" % (BOLD[1], passed, BOLD[0], duration)) + + results += "%s | %s | %s s\n" % (name.ljust(max_len_name), str(passed).ljust(6), duration) + results += BOLD[1] + "\n%s | %s | %s s (accumulated)" % ("ALL".ljust(max_len_name), str(all_passed).ljust(6), time_sum) + BOLD[0] print(results) print("\nRuntime: %s s" % (int(time.time() - time0))) @@ -264,15 +285,15 @@ def runtests(): sys.exit(not all_passed) - class RPCTestHandler: """ Trigger the testscrips passed in via the list. """ - def __init__(self, num_tests_parallel, test_list=None, flags=None): + def __init__(self, num_tests_parallel, tests_dir, test_list=None, flags=None): assert(num_tests_parallel >= 1) self.num_jobs = num_tests_parallel + self.tests_dir = tests_dir self.test_list = test_list self.flags = flags self.num_running = 0 @@ -292,7 +313,7 @@ def get_next(self): log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16) self.jobs.append((t, time.time(), - subprocess.Popen((RPC_TESTS_DIR + t).split() + self.flags + port_seed, + subprocess.Popen((self.tests_dir + t).split() + self.flags + port_seed, universal_newlines=True, stdout=log_stdout, stderr=log_stderr), @@ -357,10 +378,10 @@ def _get_uncovered_rpc_commands(self): """ # This is shared from `qa/rpc-tests/test-framework/coverage.py` - REFERENCE_FILENAME = 'rpc_interface.txt' - COVERAGE_FILE_PREFIX = 'coverage.' + reference_filename = 'rpc_interface.txt' + coverage_file_prefix = 'coverage.' - coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME) + coverage_ref_filename = os.path.join(self.dir, reference_filename) coverage_filenames = set() all_cmds = set() covered_cmds = set() @@ -373,7 +394,7 @@ def _get_uncovered_rpc_commands(self): for root, dirs, files in os.walk(self.dir): for filename in files: - if filename.startswith(COVERAGE_FILE_PREFIX): + if filename.startswith(coverage_file_prefix): coverage_filenames.add(os.path.join(root, filename)) for filename in coverage_filenames: @@ -384,4 +405,4 @@ def _get_uncovered_rpc_commands(self): if __name__ == '__main__': - runtests() + main() diff --git a/qa/rpc-tests/autois-mempool.py b/qa/rpc-tests/autois-mempool.py index 7e3f04a0d5046..61614a7224fc4 100644 --- a/qa/rpc-tests/autois-mempool.py +++ b/qa/rpc-tests/autois-mempool.py @@ -23,7 +23,7 @@ class AutoISMempoolTest(DashTestFramework): def __init__(self): - super().__init__(8, 5, ["-maxmempool=%d" % MAX_MEMPOOL_SIZE, '-limitdescendantsize=10'], fast_dip3_enforcement=True) + super().__init__(8, 5, [["-maxmempool=%d" % MAX_MEMPOOL_SIZE, '-limitdescendantsize=10']] * 8, fast_dip3_enforcement=True) # set sender, receiver self.receiver_idx = 1 self.sender_idx = 2 diff --git a/qa/rpc-tests/dip4-coinbasemerkleroots.py b/qa/rpc-tests/dip4-coinbasemerkleroots.py index 66fcf01f56a5b..e069ecbbb483c 100644 --- a/qa/rpc-tests/dip4-coinbasemerkleroots.py +++ b/qa/rpc-tests/dip4-coinbasemerkleroots.py @@ -39,7 +39,7 @@ def getmnlistdiff(self, baseBlockHash, blockHash): class LLMQCoinbaseCommitmentsTest(DashTestFramework): def __init__(self): - super().__init__(6, 5, [], fast_dip3_enforcement=True) + super().__init__(6, 5, fast_dip3_enforcement=True) def run_test(self): self.test_node = TestNode() diff --git a/qa/rpc-tests/llmq-chainlocks.py b/qa/rpc-tests/llmq-chainlocks.py index 4963c07542609..e6681f58c26f1 100644 --- a/qa/rpc-tests/llmq-chainlocks.py +++ b/qa/rpc-tests/llmq-chainlocks.py @@ -17,7 +17,7 @@ class LLMQChainLocksTest(DashTestFramework): def __init__(self): - super().__init__(6, 5, [], fast_dip3_enforcement=True) + super().__init__(6, 5, fast_dip3_enforcement=True) def run_test(self): @@ -72,7 +72,7 @@ def run_test(self): good_tip = self.nodes[0].getbestblockhash() # Restart it so that it forgets all the chainlocks from the past stop_node(self.nodes[0], 0) - self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args) + self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0]) connect_nodes(self.nodes[0], 1) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Now try to reorg the chain diff --git a/qa/rpc-tests/llmq-dkgerrors.py b/qa/rpc-tests/llmq-dkgerrors.py index 3166f47a4c01e..71c8364645a34 100644 --- a/qa/rpc-tests/llmq-dkgerrors.py +++ b/qa/rpc-tests/llmq-dkgerrors.py @@ -15,7 +15,7 @@ class LLMQDKGErrors(DashTestFramework): def __init__(self): - super().__init__(6, 5, [], fast_dip3_enforcement=True) + super().__init__(6, 5, fast_dip3_enforcement=True) def run_test(self): diff --git a/qa/rpc-tests/llmq-is-cl-conflicts.py b/qa/rpc-tests/llmq-is-cl-conflicts.py index 4351728e856e3..a004c6c1ca04b 100644 --- a/qa/rpc-tests/llmq-is-cl-conflicts.py +++ b/qa/rpc-tests/llmq-is-cl-conflicts.py @@ -45,7 +45,7 @@ def on_getdata(self, conn, message): class LLMQ_IS_CL_Conflicts(DashTestFramework): def __init__(self): - super().__init__(6, 5, [], fast_dip3_enforcement=True) + super().__init__(6, 5, fast_dip3_enforcement=True) #disable_mocktime() def run_test(self): diff --git a/qa/rpc-tests/llmq-is-retroactive.py b/qa/rpc-tests/llmq-is-retroactive.py new file mode 100755 index 0000000000000..355ceaaa3ce69 --- /dev/null +++ b/qa/rpc-tests/llmq-is-retroactive.py @@ -0,0 +1,179 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2018 The Dash Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +from test_framework.mininode import * +from test_framework.test_framework import DashTestFramework +from test_framework.util import sync_blocks, set_node_times, \ + isolate_node, reconnect_isolated_node, set_mocktime, get_mocktime + +''' +llmq-is-retroactive.py + +Tests retroactive signing + +We have 6 nodes where node 0 is the control node, nodes 1-5 are masternodes. +Mempool inconsistencies are simulated via disconnecting/reconnecting node 3 +and by having a higher relay fee on nodes 4 and 5. +''' + +class LLMQ_IS_RetroactiveSigning(DashTestFramework): + def __init__(self): + # -whitelist is needed to avoid the trickling logic on node0 + super().__init__(6, 5, [["-whitelist=127.0.0.1"], [], [], [], ["-minrelaytxfee=0.001"], ["-minrelaytxfee=0.001"]], fast_dip3_enforcement=True) + + def run_test(self): + while self.nodes[0].getblockchaininfo()["bip9_softforks"]["dip0008"]["status"] != "active": + self.nodes[0].generate(10) + sync_blocks(self.nodes, timeout=60*5) + + self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0) + self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 0) + self.nodes[0].spork("SPORK_2_INSTANTSEND_ENABLED", 0) + self.nodes[0].spork("SPORK_3_INSTANTSEND_BLOCK_FILTERING", 0) + self.nodes[0].spork("SPORK_20_INSTANTSEND_LLMQ_BASED", 0) + self.wait_for_sporks_same() + + self.mine_quorum() + self.mine_quorum() + + # Make sure that all nodes are chainlocked at the same height before starting actual tests + self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash()) + + self.log.info("trying normal IS lock") + txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) + # 3 nodes should be enough to create an IS lock even if nodes 4 and 5 (which have no tx itself) + # are the only "neighbours" in intra-quorum connections for one of them. + self.wait_for_instantlock(txid, self.nodes[0], do_assert=True) + set_mocktime(get_mocktime() + 1) + set_node_times(self.nodes, get_mocktime()) + block = self.nodes[0].generate(1)[0] + self.wait_for_chainlocked_block_all_nodes(block) + + self.log.info("testing normal signing with partially known TX") + isolate_node(self.nodes[3]) + txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) + # Make sure nodes 1 and 2 received the TX before we continue, + # otherwise it might announce the TX to node 3 when reconnecting + self.wait_for_tx(txid, self.nodes[1]) + self.wait_for_tx(txid, self.nodes[2]) + reconnect_isolated_node(self.nodes[3], 0) + self.wait_for_mnauth(self.nodes[3], 2) + # node 3 fully reconnected but the TX wasn't relayed to it, so there should be no IS lock + self.wait_for_instantlock(txid, self.nodes[0], False, 5, do_assert=True) + # push the tx directly via rpc + self.nodes[3].sendrawtransaction(self.nodes[0].getrawtransaction(txid)) + # node 3 should vote on a tx now since it became aware of it via sendrawtransaction + # and this should be enough to complete an IS lock + self.wait_for_instantlock(txid, self.nodes[0], do_assert=True) + + self.log.info("testing retroactive signing with unknown TX") + isolate_node(self.nodes[3]) + rawtx = self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1}) + rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex'] + rawtx = self.nodes[0].signrawtransaction(rawtx)['hex'] + txid = self.nodes[3].sendrawtransaction(rawtx) + # Make node 3 consider the TX as safe + set_mocktime(get_mocktime() + 10 * 60 + 1) + set_node_times(self.nodes, get_mocktime()) + block = self.nodes[3].generatetoaddress(1, self.nodes[0].getnewaddress())[0] + reconnect_isolated_node(self.nodes[3], 0) + self.wait_for_chainlocked_block_all_nodes(block) + self.nodes[0].setmocktime(get_mocktime()) + + self.log.info("testing retroactive signing with partially known TX") + isolate_node(self.nodes[3]) + txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) + # Make sure nodes 1 and 2 received the TX before we continue, + # otherwise it might announce the TX to node 3 when reconnecting + self.wait_for_tx(txid, self.nodes[1]) + self.wait_for_tx(txid, self.nodes[2]) + reconnect_isolated_node(self.nodes[3], 0) + self.wait_for_mnauth(self.nodes[3], 2) + # node 3 fully reconnected but the TX wasn't relayed to it, so there should be no IS lock + self.wait_for_instantlock(txid, self.nodes[0], False, 5, do_assert=True) + # Make node0 consider the TX as safe + set_mocktime(get_mocktime() + 10 * 60 + 1) + set_node_times(self.nodes, get_mocktime()) + block = self.nodes[0].generate(1)[0] + self.wait_for_chainlocked_block_all_nodes(block) + + self.log.info("testing retroactive signing with partially known TX and all nodes session timeout") + self.test_all_nodes_session_timeout(False) + self.log.info("repeating test, but with cycled LLMQs") + self.test_all_nodes_session_timeout(True) + + self.log.info("testing retroactive signing with partially known TX and single node session timeout") + self.test_single_node_session_timeout(False) + self.log.info("repeating test, but with cycled LLMQs") + self.test_single_node_session_timeout(True) + + def cycle_llmqs(self): + self.mine_quorum() + self.mine_quorum() + self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash()) + + def test_all_nodes_session_timeout(self, do_cycle_llmqs): + set_node_times(self.nodes, get_mocktime()) + isolate_node(self.nodes[3]) + rawtx = self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1}) + rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex'] + rawtx = self.nodes[0].signrawtransaction(rawtx)['hex'] + txid = self.nodes[0].sendrawtransaction(rawtx) + txid = self.nodes[3].sendrawtransaction(rawtx) + # Make sure nodes 1 and 2 received the TX before we continue + self.wait_for_tx(txid, self.nodes[1]) + self.wait_for_tx(txid, self.nodes[2]) + # Make sure signing is done on nodes 1 and 2 (it's async) + time.sleep(5) + # Make the signing session for the IS lock timeout on nodes 1-3 + set_mocktime(get_mocktime() + 61) + set_node_times(self.nodes, get_mocktime()) + time.sleep(2) # make sure Cleanup() is called + reconnect_isolated_node(self.nodes[3], 0) + self.wait_for_mnauth(self.nodes[3], 2) + # node 3 fully reconnected but the signing session is already timed out on all nodes, so no IS lock + self.wait_for_instantlock(txid, self.nodes[0], False, 5, do_assert=True) + if do_cycle_llmqs: + self.cycle_llmqs() + self.wait_for_instantlock(txid, self.nodes[0], False, 5, do_assert=True) + # Make node 0 consider the TX as safe + set_mocktime(get_mocktime() + 10 * 60 + 1) + self.nodes[0].setmocktime(get_mocktime()) + block = self.nodes[0].generate(1)[0] + self.wait_for_chainlocked_block_all_nodes(block) + + def test_single_node_session_timeout(self, do_cycle_llmqs): + set_node_times(self.nodes, get_mocktime()) + isolate_node(self.nodes[3]) + rawtx = self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1}) + rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex'] + rawtx = self.nodes[0].signrawtransaction(rawtx)['hex'] + txid = self.nodes[3].sendrawtransaction(rawtx) + time.sleep(2) # make sure signing is done on node 2 (it's async) + # Make the signing session for the IS lock timeout on node 3 + set_mocktime(get_mocktime() + 61) + set_node_times(self.nodes, get_mocktime()) + time.sleep(2) # make sure Cleanup() is called + reconnect_isolated_node(self.nodes[3], 0) + self.wait_for_mnauth(self.nodes[3], 2) + self.nodes[0].sendrawtransaction(rawtx) + # Make sure nodes 1 and 2 received the TX + self.wait_for_tx(txid, self.nodes[1]) + self.wait_for_tx(txid, self.nodes[2]) + # Make sure signing is done on nodes 1 and 2 (it's async) + time.sleep(5) + # node 3 fully reconnected but the signing session is already timed out on it, so no IS lock + self.wait_for_instantlock(txid, self.nodes[0], False, 1, do_assert=True) + if do_cycle_llmqs: + self.cycle_llmqs() + self.wait_for_instantlock(txid, self.nodes[0], False, 5, do_assert=True) + # Make node 0 consider the TX as safe + set_mocktime(get_mocktime() + 10 * 60 + 1) + self.nodes[0].setmocktime(get_mocktime()) + block = self.nodes[0].generate(1)[0] + self.wait_for_chainlocked_block_all_nodes(block) + +if __name__ == '__main__': + LLMQ_IS_RetroactiveSigning().main() diff --git a/qa/rpc-tests/llmq-signing.py b/qa/rpc-tests/llmq-signing.py index 02e9637988b1e..11740e4322f63 100644 --- a/qa/rpc-tests/llmq-signing.py +++ b/qa/rpc-tests/llmq-signing.py @@ -17,7 +17,7 @@ class LLMQSigningTest(DashTestFramework): def __init__(self): - super().__init__(6, 5, [], fast_dip3_enforcement=True) + super().__init__(6, 5, fast_dip3_enforcement=True) def run_test(self): diff --git a/qa/rpc-tests/llmq-simplepose.py b/qa/rpc-tests/llmq-simplepose.py index 93f00514c9634..97ca90353d5c7 100644 --- a/qa/rpc-tests/llmq-simplepose.py +++ b/qa/rpc-tests/llmq-simplepose.py @@ -16,7 +16,7 @@ class LLMQSimplePoSeTest(DashTestFramework): def __init__(self): - super().__init__(6, 5, [], fast_dip3_enforcement=True) + super().__init__(6, 5, fast_dip3_enforcement=True) def run_test(self): diff --git a/qa/rpc-tests/p2p-autoinstantsend.py b/qa/rpc-tests/p2p-autoinstantsend.py index 4f3ca51dbaa95..d605471da5e40 100755 --- a/qa/rpc-tests/p2p-autoinstantsend.py +++ b/qa/rpc-tests/p2p-autoinstantsend.py @@ -23,7 +23,7 @@ class AutoInstantSendTest(BiblepayTestFramework): def __init__(self): - super().__init__(8, 5, [], fast_dip3_enforcement=True) + super().__init__(8, 5, fast_dip3_enforcement=True) # set sender, receiver, isolated nodes self.receiver_idx = 1 self.sender_idx = 2 diff --git a/qa/rpc-tests/p2p-instantsend.py b/qa/rpc-tests/p2p-instantsend.py index 71c6db872548e..3488353c134e4 100755 --- a/qa/rpc-tests/p2p-instantsend.py +++ b/qa/rpc-tests/p2p-instantsend.py @@ -14,7 +14,7 @@ class InstantSendTest(BiblepayTestFramework): def __init__(self): - super().__init__(9, 5, [], fast_dip3_enforcement=True) + super().__init__(9, 5, fast_dip3_enforcement=True) # set sender, receiver, isolated nodes self.isolated_idx = 1 self.receiver_idx = 2 diff --git a/qa/rpc-tests/test_framework/test_framework.py b/qa/rpc-tests/test_framework/test_framework.py index dc6991deeff2e..8ef8b030794b2 100755 --- a/qa/rpc-tests/test_framework/test_framework.py +++ b/qa/rpc-tests/test_framework/test_framework.py @@ -2,8 +2,7 @@ # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. - -# Base class for RPC testing +"""Base class for RPC testing.""" import logging import optparse @@ -12,9 +11,12 @@ import shutil import tempfile import traceback +from concurrent.futures import ThreadPoolExecutor from time import time, sleep +from test_framework.mininode import wait_until from .util import ( + assert_equal, initialize_chain, start_node, start_nodes, @@ -35,8 +37,9 @@ set_mocktime, set_node_times, p2p_port, - satoshi_round -) + satoshi_round, + wait_to_sync, + copy_datadir) from .authproxy import JSONRPCException @@ -54,7 +57,7 @@ def add_options(self, parser): pass def setup_chain(self): - print("Initializing test directory "+self.options.tmpdir) + self.log.info("Initializing test directory "+self.options.tmpdir) if self.setup_clean_chain: initialize_chain_clean(self.options.tmpdir, self.num_nodes) set_genesis_mocktime() @@ -117,15 +120,17 @@ def main(self): parser = optparse.OptionParser(usage="%prog [options]") parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true", - help="Leave biblepayds and test.* datadir on exit or error") + help="Leave dashds and test.* datadir on exit or error") parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true", - help="Don't stop biblepayds after the test execution") + help="Don't stop dashds after the test execution") parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"), - help="Source directory containing biblepayd/biblepay-cli (default: %default)") + help="Source directory containing dashd/dash-cli (default: %default)") parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../cache"), help="Directory for caching pregenerated datadirs") parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"), help="Root directory for datadirs") + parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO", + help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.") parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true", help="Print out all RPC calls as they are made") parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int', @@ -138,9 +143,6 @@ def main(self): # backup dir variable for removal at cleanup self.options.root, self.options.tmpdir = self.options.tmpdir, self.options.tmpdir + '/' + str(self.options.port_seed) - if self.options.trace_rpc: - logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) - if self.options.coveragedir: enable_coverage(self.options.coveragedir) @@ -150,41 +152,45 @@ def main(self): check_json_precision() + # Set up temp directory and start logging + os.makedirs(self.options.tmpdir, exist_ok=False) + self._start_logging() + success = False + try: - os.makedirs(self.options.tmpdir, exist_ok=False) self.setup_chain() self.setup_network() self.run_test() success = True except JSONRPCException as e: - print("JSONRPC error: "+e.error['message']) - traceback.print_tb(sys.exc_info()[2]) + self.log.exception("JSONRPC error") except AssertionError as e: - print("Assertion failed: " + str(e)) - traceback.print_tb(sys.exc_info()[2]) + self.log.exception("Assertion failed") except KeyError as e: - print("key not found: "+ str(e)) - traceback.print_tb(sys.exc_info()[2]) + self.log.exception("Key error") except Exception as e: - print("Unexpected exception caught during testing: " + repr(e)) - traceback.print_tb(sys.exc_info()[2]) + self.log.exception("Unexpected exception caught during testing") except KeyboardInterrupt as e: - print("Exiting after " + repr(e)) + self.log.warning("Exiting after keyboard interrupt") if not self.options.noshutdown: - print("Stopping nodes") - stop_nodes(self.nodes) + self.log.info("Stopping nodes") + try: + stop_nodes(self.nodes) + except BaseException as e: + success = False + self.log.exception("Unexpected exception caught during shutdown") else: - print("Note: biblepayds were not stopped and may still be running") + self.log.info("Note: dashds were not stopped and may still be running") if not self.options.nocleanup and not self.options.noshutdown and success: - print("Cleaning up") + self.log.info("Cleaning up") shutil.rmtree(self.options.tmpdir) if not os.listdir(self.options.root): os.rmdir(self.options.root) else: - print("Not cleaning up dir %s" % self.options.tmpdir) + self.log.warning("Not cleaning up dir %s" % self.options.tmpdir) if os.getenv("PYTHON_DEBUG", ""): # Dump the end of the debug logs, to aid in debugging rare # travis failures. @@ -196,26 +202,58 @@ def main(self): from collections import deque print("".join(deque(open(f), MAX_LINES_TO_PRINT))) if success: - print("Tests successful") + self.log.info("Tests successful") sys.exit(0) else: - print("Failed") + self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir) + logging.shutdown() sys.exit(1) + def _start_logging(self): + # Add logger and logging handlers + self.log = logging.getLogger('TestFramework') + self.log.setLevel(logging.DEBUG) + # Create file handler to log all messages + fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log') + fh.setLevel(logging.DEBUG) + # Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel. + ch = logging.StreamHandler(sys.stdout) + # User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int + ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper() + ch.setLevel(ll) + # Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted) + formatter = logging.Formatter(fmt = '%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S') + fh.setFormatter(formatter) + ch.setFormatter(formatter) + # add the handlers to the logger + self.log.addHandler(fh) + self.log.addHandler(ch) -MASTERNODE_COLLATERAL = 1000 + if self.options.trace_rpc: + rpc_logger = logging.getLogger("BitcoinRPC") + rpc_logger.setLevel(logging.DEBUG) + rpc_handler = logging.StreamHandler(sys.stdout) + rpc_handler.setLevel(logging.DEBUG) + rpc_logger.addHandler(rpc_handler) -class MasternodeInfo: - def __init__(self, key, blsKey, collateral_id, collateral_out): - self.key = key - self.blsKey = blsKey - self.collateral_id = collateral_id - self.collateral_out = collateral_out +MASTERNODE_COLLATERAL = 1000 -class BiblepayTestFramework(BitcoinTestFramework): - def __init__(self, num_nodes, masterodes_count, extra_args): +class MasternodeInfo: + def __init__(self, proTxHash, ownerAddr, votingAddr, pubKeyOperator, keyOperator, collateral_address, collateral_txid, collateral_vout): + self.proTxHash = proTxHash + self.ownerAddr = ownerAddr + self.votingAddr = votingAddr + self.pubKeyOperator = pubKeyOperator + self.keyOperator = keyOperator + self.collateral_address = collateral_address + self.collateral_txid = collateral_txid + self.collateral_vout = collateral_vout + + +class DashTestFramework(BitcoinTestFramework): + def __init__(self, num_nodes, masterodes_count, extra_args=None, fast_dip3_enforcement=False): super().__init__() self.mn_count = masterodes_count self.num_nodes = num_nodes @@ -223,76 +261,155 @@ def __init__(self, num_nodes, masterodes_count, extra_args): self.setup_clean_chain = True self.is_network_split = False # additional args + if extra_args is None: + extra_args = [[]] * num_nodes + assert_equal(len(extra_args), num_nodes) self.extra_args = extra_args + self.extra_args[0] += ["-sporkkey=cP4EKFyJsHT39LDqgdcB43Y3YXjNyjb5Fuas1GQSeAtjnZWmZEQK"] + self.fast_dip3_enforcement = fast_dip3_enforcement + if fast_dip3_enforcement: + for i in range(0, num_nodes): + self.extra_args[i] += ["-dip3params=30:50"] + def create_simple_node(self): idx = len(self.nodes) - args = self.extra_args - self.nodes.append(start_node(idx, self.options.tmpdir, - args)) + args = self.extra_args[idx] + self.nodes.append(start_node(idx, self.options.tmpdir, args)) for i in range(0, idx): connect_nodes(self.nodes[i], idx) - def get_mnconf_file(self): - return os.path.join(self.options.tmpdir, "node0/regtest/masternode.conf") - def prepare_masternodes(self): for idx in range(0, self.mn_count): - key = self.nodes[0].masternode("genkey") - blsKey = self.nodes[0].bls('generate')['secret'] - address = self.nodes[0].getnewaddress() - txid = self.nodes[0].sendtoaddress(address, MASTERNODE_COLLATERAL) - txrow = self.nodes[0].getrawtransaction(txid, True) - collateral_vout = 0 - for vout_idx in range(0, len(txrow["vout"])): - vout = txrow["vout"][vout_idx] - if vout["value"] == MASTERNODE_COLLATERAL: - collateral_vout = vout_idx - self.nodes[0].lockunspent(False, - [{"txid": txid, "vout": collateral_vout}]) - self.mninfo.append(MasternodeInfo(key, blsKey, txid, collateral_vout)) - - def write_mn_config(self): - conf = self.get_mnconf_file() - f = open(conf, 'a') + self.prepare_masternode(idx) + + def prepare_masternode(self, idx): + bls = self.nodes[0].bls('generate') + address = self.nodes[0].getnewaddress() + txid = self.nodes[0].sendtoaddress(address, MASTERNODE_COLLATERAL) + + txraw = self.nodes[0].getrawtransaction(txid, True) + collateral_vout = 0 + for vout_idx in range(0, len(txraw["vout"])): + vout = txraw["vout"][vout_idx] + if vout["value"] == MASTERNODE_COLLATERAL: + collateral_vout = vout_idx + self.nodes[0].lockunspent(False, [{'txid': txid, 'vout': collateral_vout}]) + + # send to same address to reserve some funds for fees + self.nodes[0].sendtoaddress(address, 0.001) + + ownerAddr = self.nodes[0].getnewaddress() + votingAddr = self.nodes[0].getnewaddress() + rewardsAddr = self.nodes[0].getnewaddress() + + port = p2p_port(len(self.nodes) + idx) + if (idx % 2) == 0: + self.nodes[0].lockunspent(True, [{'txid': txid, 'vout': collateral_vout}]) + proTxHash = self.nodes[0].protx('register_fund', address, '127.0.0.1:%d' % port, ownerAddr, bls['public'], votingAddr, 0, rewardsAddr, address) + else: + self.nodes[0].generate(1) + proTxHash = self.nodes[0].protx('register', txid, collateral_vout, '127.0.0.1:%d' % port, ownerAddr, bls['public'], votingAddr, 0, rewardsAddr, address) + self.nodes[0].generate(1) + + self.mninfo.append(MasternodeInfo(proTxHash, ownerAddr, votingAddr, bls['public'], bls['secret'], address, txid, collateral_vout)) + self.sync_all() + + def remove_mastermode(self, idx): + mn = self.mninfo[idx] + rawtx = self.nodes[0].createrawtransaction([{"txid": mn.collateral_txid, "vout": mn.collateral_vout}], {self.nodes[0].getnewaddress(): 999.9999}) + rawtx = self.nodes[0].signrawtransaction(rawtx) + self.nodes[0].sendrawtransaction(rawtx["hex"]) + self.nodes[0].generate(1) + self.sync_all() + self.mninfo.remove(mn) + + def prepare_datadirs(self): + # stop faucet node so that we can copy the datadir + stop_node(self.nodes[0], 0) + + start_idx = len(self.nodes) + for idx in range(0, self.mn_count): + copy_datadir(0, idx + start_idx, self.options.tmpdir) + + # restart faucet node + self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0]) + + def start_masternodes(self): + start_idx = len(self.nodes) + + for idx in range(0, self.mn_count): + self.nodes.append(None) + executor = ThreadPoolExecutor(max_workers=20) + + def do_start(idx): + args = ['-masternode=1', + '-masternodeblsprivkey=%s' % self.mninfo[idx].keyOperator] + self.extra_args[idx + start_idx] + node = start_node(idx + start_idx, self.options.tmpdir, args) + self.mninfo[idx].nodeIdx = idx + start_idx + self.mninfo[idx].node = node + self.nodes[idx + start_idx] = node + wait_to_sync(node, True) + + def do_connect(idx): + # Connect to the control node only, masternodes should take care of intra-quorum connections themselves + connect_nodes(self.mninfo[idx].node, 0) + + jobs = [] + + # start up nodes in parallel for idx in range(0, self.mn_count): - f.write("mn%d 127.0.0.1:%d %s %s %d\n" % (idx + 1, p2p_port(idx + 1), - self.mninfo[idx].key, - self.mninfo[idx].collateral_id, - self.mninfo[idx].collateral_out)) - f.close() + jobs.append(executor.submit(do_start, idx)) + + # wait for all nodes to start up + for job in jobs: + job.result() + jobs.clear() - def create_masternodes(self): + # connect nodes in parallel for idx in range(0, self.mn_count): - args = ['-externalip=127.0.0.1', '-masternode=1', - '-masternodeprivkey=%s' % self.mninfo[idx].key, - '-masternodeblsprivkey=%s' % self.mninfo[idx].blsKey] + self.extra_args - self.nodes.append(start_node(idx + 1, self.options.tmpdir, args)) - for i in range(0, idx + 1): - connect_nodes(self.nodes[i], idx + 1) + jobs.append(executor.submit(do_connect, idx)) + + # wait for all nodes to connect + for job in jobs: + job.result() + jobs.clear() + + sync_masternodes(self.nodes, True) + + executor.shutdown() def setup_network(self): self.nodes = [] # create faucet node for collateral and transactions - args = self.extra_args - self.nodes.append(start_node(0, self.options.tmpdir, args)) + self.nodes.append(start_node(0, self.options.tmpdir, self.extra_args[0])) required_balance = MASTERNODE_COLLATERAL * self.mn_count + 1 while self.nodes[0].getbalance() < required_balance: set_mocktime(get_mocktime() + 1) set_node_times(self.nodes, get_mocktime()) self.nodes[0].generate(1) - # create masternodes - self.prepare_masternodes() - self.write_mn_config() - stop_node(self.nodes[0], 0) - args = ["-sporkkey=cP4EKFyJsHT39LDqgdcB43Y3YXjNyjb5Fuas1GQSeAtjnZWmZEQK"] + \ - self.extra_args - self.nodes[0] = start_node(0, self.options.tmpdir, - args) - self.create_masternodes() # create connected simple nodes for i in range(0, self.num_nodes - self.mn_count - 1): self.create_simple_node() + sync_masternodes(self.nodes, True) + + # activate DIP3 + if not self.fast_dip3_enforcement: + while self.nodes[0].getblockcount() < 500: + self.nodes[0].generate(10) + self.sync_all() + + # create masternodes + self.prepare_masternodes() + self.prepare_datadirs() + self.start_masternodes() + + # non-masternodes where disconnected from the control node during prepare_datadirs, + # let's reconnect them back to make sure they receive updates + num_simple_nodes = self.num_nodes - self.mn_count - 1 + for i in range(0, num_simple_nodes): + connect_nodes(self.nodes[i+1], 0) + set_mocktime(get_mocktime() + 1) set_node_times(self.nodes, get_mocktime()) self.nodes[0].generate(1) @@ -300,19 +417,73 @@ def setup_network(self): self.sync_all() set_mocktime(get_mocktime() + 1) set_node_times(self.nodes, get_mocktime()) - sync_masternodes(self.nodes, True) - for i in range(1, self.mn_count + 1): - res = self.nodes[0].masternode("start-alias", "mn%d" % i) - assert (res["result"] == 'successful') + mn_info = self.nodes[0].masternodelist("status") assert (len(mn_info) == self.mn_count) for status in mn_info.values(): assert (status == 'ENABLED') - def enforce_masternode_payments(self): - self.nodes[0].spork('SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT', 0) + def get_autois_bip9_status(self, node): + info = node.getblockchaininfo() + # we reuse the dip3 deployment + return info['bip9_softforks']['dip0003']['status'] + + def activate_autois_bip9(self, node): + # sync nodes periodically + # if we sync them too often, activation takes too many time + # if we sync them too rarely, nodes failed to update its state and + # bip9 status is not updated + # so, in this code nodes are synced once per 20 blocks + counter = 0 + sync_period = 10 + + while self.get_autois_bip9_status(node) == 'defined': + set_mocktime(get_mocktime() + 1) + set_node_times(self.nodes, get_mocktime()) + node.generate(1) + counter += 1 + if counter % sync_period == 0: + # sync nodes + self.sync_all() - def create_raw_trx(self, node_from, node_to, amount, min_inputs, max_inputs): + while self.get_autois_bip9_status(node) == 'started': + set_mocktime(get_mocktime() + 1) + set_node_times(self.nodes, get_mocktime()) + node.generate(1) + counter += 1 + if counter % sync_period == 0: + # sync nodes + self.sync_all() + + while self.get_autois_bip9_status(node) == 'locked_in': + set_mocktime(get_mocktime() + 1) + set_node_times(self.nodes, get_mocktime()) + node.generate(1) + counter += 1 + if counter % sync_period == 0: + # sync nodes + self.sync_all() + + # sync nodes + self.sync_all() + + assert(self.get_autois_bip9_status(node) == 'active') + + def get_autois_spork_state(self, node): + info = node.spork('active') + return info['SPORK_16_INSTANTSEND_AUTOLOCKS'] + + def set_autois_spork_state(self, node, state): + # Increment mocktime as otherwise nodes will not update sporks + set_mocktime(get_mocktime() + 1) + set_node_times(self.nodes, get_mocktime()) + if state: + value = 0 + else: + value = 4070908800 + node.spork('SPORK_16_INSTANTSEND_AUTOLOCKS', value) + + def create_raw_tx(self, node_from, node_to, amount, min_inputs, max_inputs): assert (min_inputs <= max_inputs) # fill inputs inputs = [] @@ -343,8 +514,9 @@ def create_raw_trx(self, node_from, node_to, amount, min_inputs, max_inputs): inputs[-1] = input last_amount = float(tx['amount']) - assert (len(inputs) > 0) - assert (in_amount > amount) + assert (len(inputs) >= min_inputs) + assert (len(inputs) <= max_inputs) + assert (in_amount >= amount) # fill outputs receiver_address = node_to.getnewaddress() change_address = node_from.getnewaddress() @@ -353,22 +525,225 @@ def create_raw_trx(self, node_from, node_to, amount, min_inputs, max_inputs): outputs[receiver_address] = satoshi_round(amount) outputs[change_address] = satoshi_round(in_amount - amount - fee) rawtx = node_from.createrawtransaction(inputs, outputs) - return node_from.signrawtransaction(rawtx) - - def wait_for_instantlock(self, txid, node): - # wait for instantsend locks - start = time() - locked = False - while True: - is_trx = node.gettransaction(txid) - if is_trx['instantlock']: - locked = True - break - if time() > start + 10: - break + ret = node_from.signrawtransaction(rawtx) + decoded = node_from.decoderawtransaction(ret['hex']) + ret = {**decoded, **ret} + return ret + + # sends regular instantsend with high fee + def send_regular_instantsend(self, sender, receiver, check_fee = True): + receiver_addr = receiver.getnewaddress() + txid = sender.instantsendtoaddress(receiver_addr, 1.0) + if (check_fee): + MIN_FEE = satoshi_round(-0.0001) + fee = sender.gettransaction(txid)['fee'] + expected_fee = MIN_FEE * len(sender.getrawtransaction(txid, True)['vin']) + assert_equal(fee, expected_fee) + return self.wait_for_instantlock(txid, sender) + + # sends simple tx, it should become locked if autolocks are allowed + def send_simple_tx(self, sender, receiver): + raw_tx = self.create_raw_tx(sender, receiver, 1.0, 1, 4) + txid = self.nodes[0].sendrawtransaction(raw_tx['hex']) + self.sync_all() + return self.wait_for_instantlock(txid, sender) + + # sends complex tx, it should never become locked for old instentsend + def send_complex_tx(self, sender, receiver): + raw_tx = self.create_raw_tx(sender, receiver, 1.0, 5, 100) + txid = sender.sendrawtransaction(raw_tx['hex']) + self.sync_all() + return self.wait_for_instantlock(txid, sender) + + def wait_for_tx(self, txid, node, expected=True, timeout=15): + def check_tx(): + try: + return node.getrawtransaction(txid) + except: + return False + w = wait_until(check_tx, timeout=timeout, sleep=0.5) + if not w and expected: + raise AssertionError("wait_for_instantlock failed") + elif w and not expected: + raise AssertionError("waiting unexpectedly succeeded") + + def wait_for_instantlock(self, txid, node, expected=True, timeout=15, do_assert=False): + def check_instantlock(): + try: + return node.getrawtransaction(txid, True)["instantlock"] + except: + return False + w = wait_until(check_instantlock, timeout=timeout, sleep=0.1) + if not w and expected: + if do_assert: + raise AssertionError("wait_for_instantlock failed") + else: + return False + elif w and not expected: + if do_assert: + raise AssertionError("waiting unexpectedly succeeded") + else: + return False + return True + + def wait_for_chainlocked_block(self, node, block_hash, expected=True, timeout=15): + def check_chainlocked_block(): + try: + block = node.getblock(block_hash) + return block["confirmations"] > 0 and block["chainlock"] + except: + return False + w = wait_until(check_chainlocked_block, timeout=timeout, sleep=0.1) + if not w and expected: + raise AssertionError("wait_for_chainlocked_block failed") + elif w and not expected: + raise AssertionError("waiting unexpectedly succeeded") + + def wait_for_chainlocked_block_all_nodes(self, block_hash, timeout=15): + for node in self.nodes: + self.wait_for_chainlocked_block(node, block_hash, timeout=timeout) + + def wait_for_sporks_same(self, timeout=30): + st = time() + while time() < st + timeout: + if self.check_sporks_same(): + return + sleep(0.5) + raise AssertionError("wait_for_sporks_same timed out") + + def check_sporks_same(self): + sporks = self.nodes[0].spork('show') + for node in self.nodes[1:]: + sporks2 = node.spork('show') + if sporks != sporks2: + return False + return True + + def wait_for_quorum_phase(self, phase, check_received_messages, check_received_messages_count, timeout=30): + t = time() + while time() - t < timeout: + all_ok = True + for mn in self.mninfo: + s = mn.node.quorum("dkgstatus")["session"] + if "llmq_5_60" not in s: + all_ok = False + break + s = s["llmq_5_60"] + if "phase" not in s: + all_ok = False + break + if s["phase"] != phase: + all_ok = False + break + if check_received_messages is not None: + if s[check_received_messages] < check_received_messages_count: + all_ok = False + break + if all_ok: + return + sleep(0.1) + raise AssertionError("wait_for_quorum_phase timed out") + + def wait_for_quorum_commitment(self, timeout = 15): + t = time() + while time() - t < timeout: + all_ok = True + for node in self.nodes: + s = node.quorum("dkgstatus") + if "minableCommitments" not in s: + all_ok = False + break + s = s["minableCommitments"] + if "llmq_5_60" not in s: + all_ok = False + break + if all_ok: + return sleep(0.1) - return locked + raise AssertionError("wait_for_quorum_commitment timed out") + + def mine_quorum(self, expected_contributions=5, expected_complaints=0, expected_justifications=0, expected_commitments=5): + quorums = self.nodes[0].quorum("list") + + # move forward to next DKG + skip_count = 24 - (self.nodes[0].getblockcount() % 24) + if skip_count != 0: + set_mocktime(get_mocktime() + 1) + set_node_times(self.nodes, get_mocktime()) + self.nodes[0].generate(skip_count) + sync_blocks(self.nodes) + + # Make sure all reached phase 1 (init) + self.wait_for_quorum_phase(1, None, 0) + # Give nodes some time to connect to neighbors + sleep(2) + set_mocktime(get_mocktime() + 1) + set_node_times(self.nodes, get_mocktime()) + self.nodes[0].generate(2) + sync_blocks(self.nodes) + + # Make sure all reached phase 2 (contribute) and received all contributions + self.wait_for_quorum_phase(2, "receivedContributions", expected_contributions) + set_mocktime(get_mocktime() + 1) + set_node_times(self.nodes, get_mocktime()) + self.nodes[0].generate(2) + sync_blocks(self.nodes) + + # Make sure all reached phase 3 (complain) and received all complaints + self.wait_for_quorum_phase(3, "receivedComplaints", expected_complaints) + set_mocktime(get_mocktime() + 1) + set_node_times(self.nodes, get_mocktime()) + self.nodes[0].generate(2) + sync_blocks(self.nodes) + + # Make sure all reached phase 4 (justify) + self.wait_for_quorum_phase(4, "receivedJustifications", expected_justifications) + set_mocktime(get_mocktime() + 1) + set_node_times(self.nodes, get_mocktime()) + self.nodes[0].generate(2) + sync_blocks(self.nodes) + + # Make sure all reached phase 5 (commit) + self.wait_for_quorum_phase(5, "receivedPrematureCommitments", expected_commitments) + set_mocktime(get_mocktime() + 1) + set_node_times(self.nodes, get_mocktime()) + self.nodes[0].generate(2) + sync_blocks(self.nodes) + + # Make sure all reached phase 6 (mining) + self.wait_for_quorum_phase(6, None, 0) + + # Wait for final commitment + self.wait_for_quorum_commitment() + + # mine the final commitment + set_mocktime(get_mocktime() + 1) + set_node_times(self.nodes, get_mocktime()) + self.nodes[0].generate(1) + while quorums == self.nodes[0].quorum("list"): + sleep(2) + set_mocktime(get_mocktime() + 1) + set_node_times(self.nodes, get_mocktime()) + self.nodes[0].generate(1) + sync_blocks(self.nodes) + new_quorum = self.nodes[0].quorum("list", 1)["llmq_5_60"][0] + + # Mine 8 (SIGN_HEIGHT_OFFSET) more blocks to make sure that the new quorum gets eligable for signing sessions + self.nodes[0].generate(8) + + sync_blocks(self.nodes) + + return new_quorum + def wait_for_mnauth(self, node, count, timeout=10): + def test(): + pi = node.getpeerinfo() + c = 0 + for p in pi: + if "verified_proregtx_hash" in p and p["verified_proregtx_hash"] != "": + c += 1 + return c >= count + assert wait_until(test, timeout=timeout) # Test framework for doing p2p comparison testing, which sets up some bitcoind # binaries: @@ -385,15 +760,15 @@ def __init__(self): def add_options(self, parser): parser.add_option("--testbinary", dest="testbinary", - default=os.getenv("BIBLEPAYD", "biblepayd"), - help="bitcoind binary to test") + default=os.getenv("BITCOIND", "dashd"), + help="dashd binary to test") parser.add_option("--refbinary", dest="refbinary", - default=os.getenv("BIBLEPAYD", "biblepayd"), - help="bitcoind binary to use for reference nodes (if any)") + default=os.getenv("BITCOIND", "dashd"), + help="dashd binary to use for reference nodes (if any)") def setup_network(self): self.nodes = start_nodes( self.num_nodes, self.options.tmpdir, - extra_args=[['-debug', '-whitelist=127.0.0.1']] * self.num_nodes, + extra_args=[['-whitelist=127.0.0.1']] * self.num_nodes, binary=[self.options.testbinary] + [self.options.refbinary]*(self.num_nodes-1)) diff --git a/src/llmq/quorums.cpp b/src/llmq/quorums.cpp index ec938f0a1c91c..1bf177d26648d 100644 --- a/src/llmq/quorums.cpp +++ b/src/llmq/quorums.cpp @@ -44,7 +44,9 @@ CQuorum::~CQuorum() { // most likely the thread is already done stopCachePopulatorThread = true; - if (cachePopulatorThread.joinable()) { + // watch out to not join the thread when we're called from inside the thread, which might happen on shutdown. This + // is because on shutdown the thread is the last owner of the shared CQuorum instance and thus the destroyer of it. + if (cachePopulatorThread.joinable() && cachePopulatorThread.get_id() != std::this_thread::get_id()) { cachePopulatorThread.join(); } } diff --git a/src/llmq/quorums_chainlocks.cpp b/src/llmq/quorums_chainlocks.cpp index cdb5d99ac14c2..bd4a83ee42855 100644 --- a/src/llmq/quorums_chainlocks.cpp +++ b/src/llmq/quorums_chainlocks.cpp @@ -347,15 +347,23 @@ void CChainLocksHandler::TrySignChainTip() void CChainLocksHandler::SyncTransaction(const CTransaction& tx, const CBlockIndex* pindex, int posInBlock) { - if (!masternodeSync.IsBlockchainSynced()) { - return; - } - bool handleTx = true; if (tx.IsCoinBase() || tx.vin.empty()) { handleTx = false; } + if (!masternodeSync.IsBlockchainSynced()) { + if (handleTx && posInBlock == CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK) { + auto info = mempool.info(tx.GetHash()); + if (!info.tx) { + return; + } + LOCK(cs); + txFirstSeenTime.emplace(tx.GetHash(), info.nTime); + } + return; + } + LOCK(cs); if (handleTx) { diff --git a/src/llmq/quorums_instantsend.cpp b/src/llmq/quorums_instantsend.cpp index eeb8de7f29ccf..6fd96826e0131 100644 --- a/src/llmq/quorums_instantsend.cpp +++ b/src/llmq/quorums_instantsend.cpp @@ -374,7 +374,7 @@ void CInstantSendManager::InterruptWorkerThread() workInterrupt(); } -bool CInstantSendManager::ProcessTx(const CTransaction& tx, const Consensus::Params& params) +bool CInstantSendManager::ProcessTx(const CTransaction& tx, bool allowReSigning, const Consensus::Params& params) { if (!IsNewInstantSendEnabled()) { return true; @@ -405,11 +405,17 @@ bool CInstantSendManager::ProcessTx(const CTransaction& tx, const Consensus::Par g_connman->RelayInvFiltered(inv, tx, LLMQS_PROTO_VERSION); } - if (IsConflicted(tx)) { + auto conflictingLock = GetConflictingLock(tx); + if (conflictingLock) { + auto islockHash = ::SerializeHash(*conflictingLock); + LogPrintf("CInstantSendManager::%s -- txid=%s: conflicts with islock %s, txid=%s\n", __func__, + tx.GetHash().ToString(), islockHash.ToString(), conflictingLock->txid.ToString()); return false; } if (!CheckCanLock(tx, true, params)) { + LogPrint("instantsend", "CInstantSendManager::%s -- txid=%s: CheckCanLock returned false\n", __func__, + tx.GetHash().ToString()); return false; } @@ -424,7 +430,7 @@ bool CInstantSendManager::ProcessTx(const CTransaction& tx, const Consensus::Par uint256 otherTxHash; if (quorumSigningManager->GetVoteForId(llmqType, id, otherTxHash)) { if (otherTxHash != tx.GetHash()) { - LogPrintf("CInstantSendManager::%s -- txid=%s: input %s is conflicting with islock %s\n", __func__, + LogPrintf("CInstantSendManager::%s -- txid=%s: input %s is conflicting with previous vote for tx %s\n", __func__, tx.GetHash().ToString(), in.prevout.ToStringShort(), otherTxHash.ToString()); return false; } @@ -433,19 +439,28 @@ bool CInstantSendManager::ProcessTx(const CTransaction& tx, const Consensus::Par // don't even try the actual signing if any input is conflicting if (quorumSigningManager->IsConflicting(llmqType, id, tx.GetHash())) { + LogPrintf("CInstantSendManager::%s -- txid=%s: quorumSigningManager->IsConflicting returned true. id=%s\n", __func__, + tx.GetHash().ToString(), id.ToString()); return false; } } - if (alreadyVotedCount == ids.size()) { + if (!allowReSigning && alreadyVotedCount == ids.size()) { + LogPrint("instantsend", "CInstantSendManager::%s -- txid=%s: already voted on all inputs, bailing out\n", __func__, + tx.GetHash().ToString()); return true; } + LogPrint("instantsend", "CInstantSendManager::%s -- txid=%s: trying to vote on %d inputs\n", __func__, + tx.GetHash().ToString(), tx.vin.size()); + for (size_t i = 0; i < tx.vin.size(); i++) { auto& in = tx.vin[i]; auto& id = ids[i]; inputRequestIds.emplace(id); - if (quorumSigningManager->AsyncSignIfMember(llmqType, id, tx.GetHash())) { - LogPrintf("CInstantSendManager::%s -- txid=%s: voted on input %s with id %s\n", __func__, + LogPrint("instantsend", "CInstantSendManager::%s -- txid=%s: trying to vote on input %s with id %s. allowReSigning=%d\n", __func__, + tx.GetHash().ToString(), in.prevout.ToStringShort(), id.ToString(), allowReSigning); + if (quorumSigningManager->AsyncSignIfMember(llmqType, id, tx.GetHash(), allowReSigning)) { + LogPrint("instantsend", "CInstantSendManager::%s -- txid=%s: voted on input %s with id %s\n", __func__, tx.GetHash().ToString(), in.prevout.ToStringShort(), id.ToString()); } } @@ -1000,6 +1015,10 @@ void CInstantSendManager::SyncTransaction(const CTransaction& tx, const CBlockIn return; } + // This is different on develop as allowReSigning is passed in from the caller. In 0.14.0.x, we have to figure this out + // here to mimic develop. + bool allowReSigning = !inMempool && !isDisconnect; + uint256 islockHash; { LOCK(cs); @@ -1022,7 +1041,7 @@ void CInstantSendManager::SyncTransaction(const CTransaction& tx, const CBlockIn bool chainlocked = pindex && chainLocksHandler->HasChainLock(pindex->nHeight, pindex->GetBlockHash()); if (islockHash.IsNull() && !chainlocked) { - ProcessTx(tx, Params().GetConsensus()); + ProcessTx(tx, allowReSigning, Params().GetConsensus()); } LOCK(cs); @@ -1054,6 +1073,9 @@ void CInstantSendManager::AddNonLockedTx(const CTransactionRef& tx) nonLockedTxsByInputs.emplace(in.prevout.hash, std::make_pair(in.prevout.n, tx->GetHash())); } } + + LogPrint("instantsend", "CInstantSendManager::%s -- txid=%s\n", __func__, + tx->GetHash().ToString()); } void CInstantSendManager::RemoveNonLockedTx(const uint256& txid, bool retryChildren) @@ -1066,10 +1088,12 @@ void CInstantSendManager::RemoveNonLockedTx(const uint256& txid, bool retryChild } auto& info = it->second; + size_t retryChildrenCount = 0; if (retryChildren) { // TX got locked, so we can retry locking children for (auto& childTxid : info.children) { pendingRetryTxs.emplace(childTxid); + retryChildrenCount++; } } @@ -1096,6 +1120,9 @@ void CInstantSendManager::RemoveNonLockedTx(const uint256& txid, bool retryChild } nonLockedTxs.erase(it); + + LogPrint("instantsend", "CInstantSendManager::%s -- txid=%s, retryChildren=%d, retryChildrenCount=%d\n", __func__, + txid.ToString(), retryChildren, retryChildrenCount); } void CInstantSendManager::RemoveConflictedTx(const CTransaction& tx) @@ -1398,7 +1425,7 @@ bool CInstantSendManager::ProcessPendingRetryLockTxs() tx->GetHash().ToString()); } - ProcessTx(*tx, Params().GetConsensus()); + ProcessTx(*tx, false, Params().GetConsensus()); retryCount++; } diff --git a/src/llmq/quorums_instantsend.h b/src/llmq/quorums_instantsend.h index bb696b4adef73..11ff8583d1e09 100644 --- a/src/llmq/quorums_instantsend.h +++ b/src/llmq/quorums_instantsend.h @@ -120,7 +120,7 @@ class CInstantSendManager : public CRecoveredSigsListener void InterruptWorkerThread(); public: - bool ProcessTx(const CTransaction& tx, const Consensus::Params& params); + bool ProcessTx(const CTransaction& tx, bool allowReSigning, const Consensus::Params& params); bool CheckCanLock(const CTransaction& tx, bool printDebug, const Consensus::Params& params); bool CheckCanLock(const COutPoint& outpoint, bool printDebug, const uint256& txHash, CAmount* retValue, const Consensus::Params& params); bool IsLocked(const uint256& txHash); diff --git a/src/llmq/quorums_signing.cpp b/src/llmq/quorums_signing.cpp index 39fad65af763b..d8a0f9e07e156 100644 --- a/src/llmq/quorums_signing.cpp +++ b/src/llmq/quorums_signing.cpp @@ -473,7 +473,8 @@ void CSigningManager::ProcessMessageRecoveredSig(CNode* pfrom, const CRecoveredS return; } - LogPrint("llmq", "CSigningManager::%s -- signHash=%s, node=%d\n", __func__, CLLMQUtils::BuildSignHash(recoveredSig).ToString(), pfrom->id); + LogPrint("llmq", "CSigningManager::%s -- signHash=%s, id=%s, msgHash=%s, node=%d\n", __func__, + CLLMQUtils::BuildSignHash(recoveredSig).ToString(), recoveredSig.id.ToString(), recoveredSig.msgHash.ToString(), pfrom->GetId()); LOCK(cs); pendingRecoveredSigs[pfrom->id].emplace_back(recoveredSig); @@ -742,7 +743,7 @@ void CSigningManager::UnregisterRecoveredSigsListener(CRecoveredSigsListener* l) recoveredSigsListeners.erase(itRem, recoveredSigsListeners.end()); } -bool CSigningManager::AsyncSignIfMember(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash) +bool CSigningManager::AsyncSignIfMember(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash, bool allowReSign) { auto& params = Params().GetConsensus().llmqs.at(llmqType); @@ -753,24 +754,31 @@ bool CSigningManager::AsyncSignIfMember(Consensus::LLMQType llmqType, const uint { LOCK(cs); - if (db.HasVotedOnId(llmqType, id)) { + bool hasVoted = db.HasVotedOnId(llmqType, id); + if (hasVoted) { uint256 prevMsgHash; db.GetVoteForId(llmqType, id, prevMsgHash); if (msgHash != prevMsgHash) { LogPrintf("CSigningManager::%s -- already voted for id=%s and msgHash=%s. Not voting on conflicting msgHash=%s\n", __func__, id.ToString(), prevMsgHash.ToString(), msgHash.ToString()); + return false; + } else if (allowReSign) { + LogPrint("llmq", "CSigningManager::%s -- already voted for id=%s and msgHash=%s. Resigning!\n", __func__, + id.ToString(), prevMsgHash.ToString()); } else { LogPrint("llmq", "CSigningManager::%s -- already voted for id=%s and msgHash=%s. Not voting again.\n", __func__, id.ToString(), prevMsgHash.ToString()); + return false; } - return false; } if (db.HasRecoveredSigForId(llmqType, id)) { // no need to sign it if we already have a recovered sig return true; } - db.WriteVoteForId(llmqType, id, msgHash); + if (!hasVoted) { + db.WriteVoteForId(llmqType, id, msgHash); + } } int tipHeight; @@ -795,6 +803,10 @@ bool CSigningManager::AsyncSignIfMember(Consensus::LLMQType llmqType, const uint return false; } + if (allowReSign) { + // make us re-announce all known shares (other nodes might have run into a timeout) + quorumSigSharesManager->ForceReAnnouncement(quorum, llmqType, id, msgHash); + } quorumSigSharesManager->AsyncSign(quorum, id, msgHash); return true; diff --git a/src/llmq/quorums_signing.h b/src/llmq/quorums_signing.h index c4c5343032fc7..92d18e4af0d42 100644 --- a/src/llmq/quorums_signing.h +++ b/src/llmq/quorums_signing.h @@ -167,7 +167,7 @@ class CSigningManager void RegisterRecoveredSigsListener(CRecoveredSigsListener* l); void UnregisterRecoveredSigsListener(CRecoveredSigsListener* l); - bool AsyncSignIfMember(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash); + bool AsyncSignIfMember(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash, bool allowReSign = false); bool HasRecoveredSig(Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash); bool HasRecoveredSigForId(Consensus::LLMQType llmqType, const uint256& id); bool HasRecoveredSigForSession(const uint256& signHash); diff --git a/src/llmq/quorums_signing_shares.cpp b/src/llmq/quorums_signing_shares.cpp index b5f7284605f4a..c1632500ce3c2 100644 --- a/src/llmq/quorums_signing_shares.cpp +++ b/src/llmq/quorums_signing_shares.cpp @@ -82,6 +82,13 @@ void CSigSharesInv::Set(uint16_t quorumMember, bool v) inv[quorumMember] = v; } +void CSigSharesInv::SetAll(bool v) +{ + for (size_t i = 0; i < inv.size(); i++) { + inv[i] = v; + } +} + std::string CBatchedSigShares::ToInvString() const { CSigSharesInv inv; @@ -679,7 +686,7 @@ void CSigSharesManager::ProcessSigShare(NodeId nodeId, const CSigShare& sigShare sigSharesToAnnounce.Add(sigShare.GetKey(), true); // Update the time we've seen the last sigShare - timeSeenForSessions[sigShare.GetSignHash()] = GetTimeMillis(); + timeSeenForSessions[sigShare.GetSignHash()] = GetAdjustedTime(); if (!quorumNodes.empty()) { // don't announce and wait for other nodes to request this share and directly send it to them @@ -778,7 +785,7 @@ void CSigSharesManager::CollectSigSharesToRequest(std::unordered_mapqc.quorumHash, id, msgHash); + auto sigs = sigShares.GetAllForSignHash(signHash); + if (sigs) { + for (auto& p : *sigs) { + // re-announce every sigshare to every node + sigSharesToAnnounce.Add(std::make_pair(signHash, p.first), true); + } + } + for (auto& p : nodeStates) { + CSigSharesNodeState& nodeState = p.second; + auto session = nodeState.GetSessionBySignHash(signHash); + if (!session) { + continue; + } + // pretend that the other node doesn't know about any shares so that we re-announce everything + session->knows.SetAll(false); + // we need to use a new session id as we don't know if the other node has run into a timeout already + session->sendSessionId = (uint32_t)-1; + } +} + void CSigSharesManager::HandleNewRecoveredSig(const llmq::CRecoveredSig& recoveredSig) { LOCK(cs); diff --git a/src/llmq/quorums_signing_shares.h b/src/llmq/quorums_signing_shares.h index 654f88268f0d3..340c8ea07b28b 100644 --- a/src/llmq/quorums_signing_shares.h +++ b/src/llmq/quorums_signing_shares.h @@ -104,6 +104,7 @@ class CSigSharesInv void Init(size_t size); bool IsSet(uint16_t quorumMember) const; void Set(uint16_t quorumMember, bool v); + void SetAll(bool v); void Merge(const CSigSharesInv& inv2); size_t CountSet() const; @@ -329,8 +330,8 @@ class CSigSharesNodeState class CSigSharesManager : public CRecoveredSigsListener { - static const int64_t SESSION_NEW_SHARES_TIMEOUT = 60 * 1000; - static const int64_t SIG_SHARE_REQUEST_TIMEOUT = 5 * 1000; + static const int64_t SESSION_NEW_SHARES_TIMEOUT = 60; + static const int64_t SIG_SHARE_REQUEST_TIMEOUT = 5; // we try to keep total message size below 10k const size_t MAX_MSGS_CNT_QSIGSESANN = 100; @@ -377,6 +378,7 @@ class CSigSharesManager : public CRecoveredSigsListener void AsyncSign(const CQuorumCPtr& quorum, const uint256& id, const uint256& msgHash); void Sign(const CQuorumCPtr& quorum, const uint256& id, const uint256& msgHash); + void ForceReAnnouncement(const CQuorumCPtr& quorum, Consensus::LLMQType llmqType, const uint256& id, const uint256& msgHash); void HandleNewRecoveredSig(const CRecoveredSig& recoveredSig); diff --git a/src/net.cpp b/src/net.cpp index 254f5a802b340..fe0d7b46dcb12 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -696,6 +696,11 @@ void CNode::copyStats(CNodeStats &stats) // Leave string empty if addrLocal invalid (not filled in yet) CService addrLocalUnlocked = GetAddrLocal(); stats.addrLocal = addrLocalUnlocked.IsValid() ? addrLocalUnlocked.ToString() : ""; + + { + LOCK(cs_mnauth); + X(verifiedProRegTxHash); + } } #undef X diff --git a/src/net.h b/src/net.h index 10ac801ce8623..5a41face96b36 100644 --- a/src/net.h +++ b/src/net.h @@ -660,6 +660,8 @@ class CNodeStats double dMinPing; std::string addrLocal; CAddress addr; + // In case this is a verified MN, this value is the proTx of the MN + uint256 verifiedProRegTxHash; }; diff --git a/src/net_processing.cpp b/src/net_processing.cpp index c9dd3733e8a01..ecf8203f91961 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -1390,10 +1390,10 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam } } // release cs_main - if (it != pfrom->vRecvGetData.end()) { + if (it != pfrom->vRecvGetData.end() && !pfrom->fPauseSend) { const CInv &inv = *it; - it++; if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK) { + it++; ProcessGetBlockData(pfrom, consensusParams, inv, connman, interruptMsgProc); } } diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index 6e24a5fd0774e..c280ef1370aa3 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -80,6 +80,9 @@ UniValue getpeerinfo(const JSONRPCRequest& request) " \"addr\":\"host:port\", (string) The ip address and port of the peer\n" " \"addrlocal\":\"ip:port\", (string) local address\n" " \"services\":\"xxxxxxxxxxxxxxxx\", (string) The services offered\n" + " \"verified_proregtx_hash\": h, (hex) Only present when the peer is a masternode and succesfully\n" + " autheticated via MNAUTH. In this case, this field contains the\n" + " protx hash of the masternode\n" " \"relaytxes\":true|false, (boolean) Whether peer has asked us to relay transactions to it\n" " \"lastsend\": ttt, (numeric) The time in seconds since epoch (Jan 1 1970 GMT) of the last send\n" " \"lastrecv\": ttt, (numeric) The time in seconds since epoch (Jan 1 1970 GMT) of the last receive\n" @@ -136,6 +139,9 @@ UniValue getpeerinfo(const JSONRPCRequest& request) if (!(stats.addrLocal.empty())) obj.push_back(Pair("addrlocal", stats.addrLocal)); obj.push_back(Pair("services", strprintf("%016x", stats.nServices))); + if (!stats.verifiedProRegTxHash.IsNull()) { + obj.push_back(Pair("verified_proregtx_hash", stats.verifiedProRegTxHash.ToString())); + } obj.push_back(Pair("relaytxes", stats.fRelayTxes)); obj.push_back(Pair("lastsend", stats.nLastSend)); obj.push_back(Pair("lastrecv", stats.nLastRecv));