diff --git a/contrib/devtools/github-merge.py b/contrib/devtools/github-merge.py index f1b6a12fd058e..3fee39143dad7 100755 --- a/contrib/devtools/github-merge.py +++ b/contrib/devtools/github-merge.py @@ -78,24 +78,53 @@ def get_symlink_files(): ret.append(f.decode('utf-8').split("\t")[1]) return ret -def tree_sha512sum(): - files = sorted(subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', '--name-only', 'HEAD']).splitlines()) +def tree_sha512sum(commit='HEAD'): + # request metadata for entire tree, recursively + files = [] + blob_by_name = {} + for line in subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', commit]).splitlines(): + name_sep = line.index(b'\t') + metadata = line[:name_sep].split() # perms, 'blob', blobid + assert(metadata[1] == b'blob') + name = line[name_sep+1:] + files.append(name) + blob_by_name[name] = metadata[2] + + files.sort() + # open connection to git-cat-file in batch mode to request data for all blobs + # this is much faster than launching it per file + p = subprocess.Popen([GIT, 'cat-file', '--batch'], stdout=subprocess.PIPE, stdin=subprocess.PIPE) overall = hashlib.sha512() for f in files: + blob = blob_by_name[f] + # request blob + p.stdin.write(blob + b'\n') + p.stdin.flush() + # read header: blob, "blob", size + reply = p.stdout.readline().split() + assert(reply[0] == blob and reply[1] == b'blob') + size = int(reply[2]) + # hash the blob data intern = hashlib.sha512() - fi = open(f, 'rb') - while True: - piece = fi.read(65536) - if piece: + ptr = 0 + while ptr < size: + bs = min(65536, size - ptr) + piece = p.stdout.read(bs) + if len(piece) == bs: intern.update(piece) else: - break - fi.close() + raise IOError('Premature EOF reading git cat-file output') + ptr += bs dig = intern.hexdigest() + assert(p.stdout.read(1) == b'\n') # ignore LF that follows blob data + # update overall hash with file hash overall.update(dig.encode("utf-8")) overall.update(" ".encode("utf-8")) overall.update(f) overall.update("\n".encode("utf-8")) + p.stdin.close() + if p.wait(): + raise IOError('Non-zero return value executing git cat-file') return overall.hexdigest() diff --git a/contrib/gitian-descriptors/gitian-win.yml b/contrib/gitian-descriptors/gitian-win.yml index d04fdb2bf99ac..d85867e07afa2 100755 --- a/contrib/gitian-descriptors/gitian-win.yml +++ b/contrib/gitian-descriptors/gitian-win.yml @@ -179,6 +179,7 @@ script: | make ${MAKEOPTS} -C src check-security make deploy make install DESTDIR=${INSTALLPATH} + rename 's/-setup\.exe$/-setup-unsigned.exe/' *-setup.exe cp -f dashcore-*setup*.exe $OUTDIR/ cd installed mv ${DISTNAME}/bin/*.dll ${DISTNAME}/lib/ @@ -192,9 +193,11 @@ script: | cd ../../ rm -rf distsrc-${i} done - cd $OUTDIR - rename 's/-setup\.exe$/-setup-unsigned.exe/' *-setup.exe - find . -name "*-setup-unsigned.exe" | sort | tar --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-win-unsigned.tar.gz + cp -rf contrib/windeploy $BUILD_DIR + cd $BUILD_DIR/windeploy + mkdir unsigned + cp $OUTDIR/dashcore-*setup-unsigned.exe unsigned/ + find . | sort | tar --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-win-unsigned.tar.gz mv ${OUTDIR}/${DISTNAME}-x86_64-*-debug.zip ${OUTDIR}/${DISTNAME}-win64-debug.zip mv ${OUTDIR}/${DISTNAME}-i686-*-debug.zip ${OUTDIR}/${DISTNAME}-win32-debug.zip mv ${OUTDIR}/${DISTNAME}-x86_64-*.zip ${OUTDIR}/${DISTNAME}-win64.zip diff --git a/contrib/macdeploy/detached-sig-create.sh b/contrib/macdeploy/detached-sig-create.sh index 2d553d2666547..93b8de8e84fae 100755 --- a/contrib/macdeploy/detached-sig-create.sh +++ b/contrib/macdeploy/detached-sig-create.sh @@ -10,7 +10,7 @@ BUNDLE="${ROOTDIR}/Dash-Qt.app" CODESIGN=codesign TEMPDIR=sign.temp TEMPLIST=${TEMPDIR}/signatures.txt -OUT=signature.tar.gz +OUT=signature-osx.tar.gz OUTROOT=osx if [ ! -n "$1" ]; then diff --git a/contrib/verify-commits/trusted-sha512-root-commit b/contrib/verify-commits/trusted-sha512-root-commit index c28f50ff78454..7d41f90ad70ef 100644 --- a/contrib/verify-commits/trusted-sha512-root-commit +++ b/contrib/verify-commits/trusted-sha512-root-commit @@ -1 +1 @@ -f7ec7cfd38b543ba81ac7bed5b77f9a19739460b +309bf16257b2395ce502017be627186b749ee749 diff --git a/contrib/verify-commits/verify-commits.sh b/contrib/verify-commits/verify-commits.sh index 73a3cf0356b55..74b7f38375ac3 100755 --- a/contrib/verify-commits/verify-commits.sh +++ b/contrib/verify-commits/verify-commits.sh @@ -3,9 +3,6 @@ # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -# Not technically POSIX-compliant due to use of "local", but almost every -# shell anyone uses today supports it, so its probably fine - DIR=$(dirname "$0") [ "/${DIR#/}" != "$DIR" ] && DIR=$(dirname "$(pwd)/$0") @@ -16,14 +13,36 @@ VERIFIED_SHA512_ROOT=$(cat "${DIR}/trusted-sha512-root-commit") REVSIG_ALLOWED=$(cat "${DIR}/allow-revsig-commits") HAVE_FAILED=false -IS_SIGNED () { - if [ $1 = $VERIFIED_ROOT ]; then - return 0; + +HAVE_GNU_SHA512=1 +[ ! -x "$(which sha512sum)" ] && HAVE_GNU_SHA512=0 + +if [ x"$1" = "x" ]; then + CURRENT_COMMIT="HEAD" +else + CURRENT_COMMIT="$1" +fi + +if [ "${CURRENT_COMMIT#* }" != "$CURRENT_COMMIT" ]; then + echo "Commit must not contain spaces?" > /dev/stderr + exit 1 +fi + +VERIFY_TREE=0 +if [ x"$2" = "x--tree-checks" ]; then + VERIFY_TREE=1 +fi + +NO_SHA1=1 +PREV_COMMIT="" + +while true; do + if [ "$CURRENT_COMMIT" = $VERIFIED_ROOT ]; then + echo "There is a valid path from "$CURRENT_COMMIT" to $VERIFIED_ROOT where all commits are signed!" + exit 0; fi - VERIFY_TREE=$2 - NO_SHA1=$3 - if [ $1 = $VERIFIED_SHA512_ROOT ]; then + if [ "$CURRENT_COMMIT" = $VERIFIED_SHA512_ROOT ]; then if [ "$VERIFY_TREE" = "1" ]; then echo "All Tree-SHA512s matched up to $VERIFIED_SHA512_ROOT" > /dev/stderr fi @@ -37,91 +56,77 @@ IS_SIGNED () { export BITCOIN_VERIFY_COMMITS_ALLOW_SHA1=1 fi - if [ "${REVSIG_ALLOWED#*$1}" != "$REVSIG_ALLOWED" ]; then + if [ "${REVSIG_ALLOWED#*$CURRENT_COMMIT}" != "$REVSIG_ALLOWED" ]; then export BITCOIN_VERIFY_COMMITS_ALLOW_REVSIG=1 else export BITCOIN_VERIFY_COMMITS_ALLOW_REVSIG=0 fi - if ! git -c "gpg.program=${DIR}/gpg.sh" verify-commit $1 > /dev/null; then - return 1; + if ! git -c "gpg.program=${DIR}/gpg.sh" verify-commit "$CURRENT_COMMIT" > /dev/null; then + if [ "$PREV_COMMIT" != "" ]; then + echo "No parent of $PREV_COMMIT was signed with a trusted key!" > /dev/stderr + echo "Parents are:" > /dev/stderr + PARENTS=$(git show -s --format=format:%P $PREV_COMMIT) + for PARENT in $PARENTS; do + git show -s $PARENT > /dev/stderr + done + else + echo "$CURRENT_COMMIT was not signed with a trusted key!" > /dev/stderr + fi + exit 1 fi - # We set $4 to 1 on the first call, always verifying the top of the tree - if [ "$VERIFY_TREE" = 1 -o "$4" = "1" ]; then + # We always verify the top of the tree + if [ "$VERIFY_TREE" = 1 -o "$PREV_COMMIT" = "" ]; then IFS_CACHE="$IFS" IFS=' ' - for LINE in $(git ls-tree --full-tree -r $1); do + for LINE in $(git ls-tree --full-tree -r "$CURRENT_COMMIT"); do case "$LINE" in "12"*) echo "Repo contains symlinks" > /dev/stderr IFS="$IFS_CACHE" - return 1 + exit 1 ;; esac done IFS="$IFS_CACHE" FILE_HASHES="" - for FILE in $(git ls-tree --full-tree -r --name-only $1 | LC_ALL=C sort); do - HASH=$(git cat-file blob $1:"$FILE" | sha512sum | { read FIRST OTHER; echo $FIRST; } ) + for FILE in $(git ls-tree --full-tree -r --name-only "$CURRENT_COMMIT" | LC_ALL=C sort); do + if [ "$HAVE_GNU_SHA512" = 1 ]; then + HASH=$(git cat-file blob "$CURRENT_COMMIT":"$FILE" | sha512sum | { read FIRST OTHER; echo $FIRST; } ) + else + HASH=$(git cat-file blob "$CURRENT_COMMIT":"$FILE" | shasum -a 512 | { read FIRST OTHER; echo $FIRST; } ) + fi [ "$FILE_HASHES" != "" ] && FILE_HASHES="$FILE_HASHES"' ' FILE_HASHES="$FILE_HASHES$HASH $FILE" done + + if [ "$HAVE_GNU_SHA512" = 1 ]; then + TREE_HASH="$(echo "$FILE_HASHES" | sha512sum)" + else + TREE_HASH="$(echo "$FILE_HASHES" | shasum -a 512)" + fi HASH_MATCHES=0 - MSG="$(git show -s --format=format:%B $1 | tail -n1)" + MSG="$(git show -s --format=format:%B "$CURRENT_COMMIT" | tail -n1)" case "$MSG -" in - "Tree-SHA512: $(echo "$FILE_HASHES" | sha512sum)") + "Tree-SHA512: $TREE_HASH") HASH_MATCHES=1;; esac if [ "$HASH_MATCHES" = "0" ]; then - echo "Tree-SHA512 did not match for commit $1" > /dev/stderr - HAVE_FAILED=true - return 1 + echo "Tree-SHA512 did not match for commit $CURRENT_COMMIT" > /dev/stderr + exit 1 fi fi - local PARENTS - PARENTS=$(git show -s --format=format:%P $1) + PARENTS=$(git show -s --format=format:%P "$CURRENT_COMMIT") for PARENT in $PARENTS; do - if IS_SIGNED $PARENT $VERIFY_TREE $NO_SHA1 0; then - return 0; - fi + PREV_COMMIT="$CURRENT_COMMIT" + CURRENT_COMMIT="$PARENT" + break done - if ! "$HAVE_FAILED"; then - echo "No parent of $1 was signed with a trusted key!" > /dev/stderr - echo "Parents are:" > /dev/stderr - for PARENT in $PARENTS; do - git show -s $PARENT > /dev/stderr - done - HAVE_FAILED=true - fi - return 1; -} - -if [ x"$1" = "x" ]; then - TEST_COMMIT="HEAD" -else - TEST_COMMIT="$1" -fi - -DO_CHECKOUT_TEST=0 -if [ x"$2" = "x--tree-checks" ]; then - DO_CHECKOUT_TEST=1 -fi - -IS_SIGNED "$TEST_COMMIT" "$DO_CHECKOUT_TEST" 1 1 -RES=$? -if [ "$RES" = 1 ]; then - if ! "$HAVE_FAILED"; then - echo "$TEST_COMMIT was not signed with a trusted key!" - fi -else - echo "There is a valid path from $TEST_COMMIT to $VERIFIED_ROOT where all commits are signed!" -fi - -exit $RES +done diff --git a/contrib/windeploy/detached-sig-create.sh b/contrib/windeploy/detached-sig-create.sh new file mode 100755 index 0000000000000..bf4978d143014 --- /dev/null +++ b/contrib/windeploy/detached-sig-create.sh @@ -0,0 +1,34 @@ +#!/bin/sh +# Copyright (c) 2014-2015 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +if [ -z "$OSSLSIGNCODE" ]; then + OSSLSIGNCODE=osslsigncode +fi + +if [ ! -n "$1" ]; then + echo "usage: $0 " + echo "example: $0 -key codesign.key" + exit 1 +fi + +OUT=signature-win.tar.gz +SRCDIR=unsigned +WORKDIR=./.tmp +OUTDIR="${WORKDIR}/out" +OUTSUBDIR="${OUTDIR}/win" +TIMESERVER=http://timestamp.comodoca.com +CERTFILE="win-codesign.cert" + +mkdir -p "${OUTSUBDIR}" +basename -a `ls -1 "${SRCDIR}"/*-unsigned.exe` | while read UNSIGNED; do + echo Signing "${UNSIGNED}" + "${OSSLSIGNCODE}" sign -certs "${CERTFILE}" -t "${TIMESERVER}" -in "${SRCDIR}/${UNSIGNED}" -out "${WORKDIR}/${UNSIGNED}" "$@" + "${OSSLSIGNCODE}" extract-signature -pem -in "${WORKDIR}/${UNSIGNED}" -out "${OUTSUBDIR}/${UNSIGNED}.pem" && rm "${WORKDIR}/${UNSIGNED}" +done + +rm -f "${OUT}" +tar -C "${OUTDIR}" -czf "${OUT}" . +rm -rf "${WORKDIR}" +echo "Created ${OUT}" diff --git a/contrib/windeploy/win-codesign.cert b/contrib/windeploy/win-codesign.cert new file mode 100644 index 0000000000000..200b30a3f0d2f --- /dev/null +++ b/contrib/windeploy/win-codesign.cert @@ -0,0 +1,99 @@ +-----BEGIN CERTIFICATE----- +MIIFTTCCBDWgAwIBAgIRALlW05RLwG2hMQMX5d/o5J8wDQYJKoZIhvcNAQELBQAw +fTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxIzAhBgNV +BAMTGkNPTU9ETyBSU0EgQ29kZSBTaWduaW5nIENBMB4XDTE2MDIwMzAwMDAwMFoX +DTE5MDMwNTIzNTk1OVowgbUxCzAJBgNVBAYTAlVTMQ4wDAYDVQQRDAU5ODEwNDEL +MAkGA1UECAwCV0ExEDAOBgNVBAcMB1NlYXR0bGUxEDAOBgNVBAkMB1N0ZSAzMDAx +FzAVBgNVBAkMDjcxIENvbHVtYmlhIFN0MSUwIwYDVQQKDBxUaGUgQml0Y29pbiBG +b3VuZGF0aW9uLCBJbmMuMSUwIwYDVQQDDBxUaGUgQml0Y29pbiBGb3VuZGF0aW9u +LCBJbmMuMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAw37Vrv9Gbku0 ++kuV0t89TuyxtAcmT7QE4GcwESKKjmkxfzD9a0qlhqk8GfQ+fw4DHNN+nLKNv7xB +bk6aS7J2v2DcXkOjrP99P9jqgTkp7MC04VtG3OqVRGB+gum0pptRovYZUQXIdkY7 +GJOok/NDagwKiiUe2V2meZ7UctsZNvYeilQdTgKIIhrMB9NowCOhT8ocVL4Ki55/ +l7hukJn3fueCM3fHTwY2/1gaGsOHoCkFRsD7vokjAVpiY+8rUgvHjb0gxgojiVGd +6a6/F5XJwKJacvUyN4Hfc2K5lRMQjTTmo4aWNWIa0iJ3TK9BHpdSLJBqerMPvmnM +kkapS+ZTNQIDAQABo4IBjTCCAYkwHwYDVR0jBBgwFoAUKZFg/4pN+uv5pmq4z/nm +S71JzhIwHQYDVR0OBBYEFONpQ+cV82URVe+V8G57377KxxexMA4GA1UdDwEB/wQE +AwIHgDAMBgNVHRMBAf8EAjAAMBMGA1UdJQQMMAoGCCsGAQUFBwMDMBEGCWCGSAGG ++EIBAQQEAwIEEDBGBgNVHSAEPzA9MDsGDCsGAQQBsjEBAgEDAjArMCkGCCsGAQUF +BwIBFh1odHRwczovL3NlY3VyZS5jb21vZG8ubmV0L0NQUzBDBgNVHR8EPDA6MDig +NqA0hjJodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9SU0FDb2RlU2lnbmlu +Z0NBLmNybDB0BggrBgEFBQcBAQRoMGYwPgYIKwYBBQUHMAKGMmh0dHA6Ly9jcnQu +Y29tb2RvY2EuY29tL0NPTU9ET1JTQUNvZGVTaWduaW5nQ0EuY3J0MCQGCCsGAQUF +BzABhhhodHRwOi8vb2NzcC5jb21vZG9jYS5jb20wDQYJKoZIhvcNAQELBQADggEB +AGnBSi9K/9rgTAyKFKrfGWSfNOwAghmsnsvpZSQ7QyoGWBFKSgCs/70kErl18oHA +g7Y8loQB1yukZmJaCa3OvGud7smn45TCh0TMf4EpP20Wxf4rMQTxwAatasHL3+vi +I+Nl5bsRZ09kWjvayqLII5upjS/yq0JfpmyGl5k2C/fIpztq0iOLvqWlXcL4+51r +cMUAfX6E6EaZQm//ikp+w2+7MEXTKguOuV3gwsrTy0DsvkZl4YDgx/FA4ImzXopv +d+3KJPLvO+OSBqUD3JPwXHnuJqGAbLBFyyCa/feGUjLlR8cxcNWLWdp4qxtoIUPG +3wTsC9YgrglS0F7FKMXlNRY= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF4DCCA8igAwIBAgIQLnyHzA6TSlL+lP0ct800rzANBgkqhkiG9w0BAQwFADCB +hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV +BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTMwNTA5 +MDAwMDAwWhcNMjgwNTA4MjM1OTU5WjB9MQswCQYDVQQGEwJHQjEbMBkGA1UECBMS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFD +T01PRE8gQ0EgTGltaXRlZDEjMCEGA1UEAxMaQ09NT0RPIFJTQSBDb2RlIFNpZ25p +bmcgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCmmJBjd5E0f4rR +3elnMRHrzB79MR2zuWJXP5O8W+OfHiQyESdrvFGRp8+eniWzX4GoGA8dHiAwDvth +e4YJs+P9omidHCydv3Lj5HWg5TUjjsmK7hoMZMfYQqF7tVIDSzqwjiNLS2PgIpQ3 +e9V5kAoUGFEs5v7BEvAcP2FhCoyi3PbDMKrNKBh1SMF5WgjNu4xVjPfUdpA6M0ZQ +c5hc9IVKaw+A3V7Wvf2pL8Al9fl4141fEMJEVTyQPDFGy3CuB6kK46/BAW+QGiPi +XzjbxghdR7ODQfAuADcUuRKqeZJSzYcPe9hiKaR+ML0btYxytEjy4+gh+V5MYnmL +Agaff9ULAgMBAAGjggFRMIIBTTAfBgNVHSMEGDAWgBS7r34CPfqm8TyEjq3uOJjs +2TIy1DAdBgNVHQ4EFgQUKZFg/4pN+uv5pmq4z/nmS71JzhIwDgYDVR0PAQH/BAQD +AgGGMBIGA1UdEwEB/wQIMAYBAf8CAQAwEwYDVR0lBAwwCgYIKwYBBQUHAwMwEQYD +VR0gBAowCDAGBgRVHSAAMEwGA1UdHwRFMEMwQaA/oD2GO2h0dHA6Ly9jcmwuY29t +b2RvY2EuY29tL0NPTU9ET1JTQUNlcnRpZmljYXRpb25BdXRob3JpdHkuY3JsMHEG +CCsGAQUFBwEBBGUwYzA7BggrBgEFBQcwAoYvaHR0cDovL2NydC5jb21vZG9jYS5j +b20vQ09NT0RPUlNBQWRkVHJ1c3RDQS5jcnQwJAYIKwYBBQUHMAGGGGh0dHA6Ly9v +Y3NwLmNvbW9kb2NhLmNvbTANBgkqhkiG9w0BAQwFAAOCAgEAAj8COcPu+Mo7id4M +bU2x8U6ST6/COCwEzMVjEasJY6+rotcCP8xvGcM91hoIlP8l2KmIpysQGuCbsQci +GlEcOtTh6Qm/5iR0rx57FjFuI+9UUS1SAuJ1CAVM8bdR4VEAxof2bO4QRHZXavHf +WGshqknUfDdOvf+2dVRAGDZXZxHNTwLk/vPa/HUX2+y392UJI0kfQ1eD6n4gd2HI +TfK7ZU2o94VFB696aSdlkClAi997OlE5jKgfcHmtbUIgos8MbAOMTM1zB5TnWo46 +BLqioXwfy2M6FafUFRunUkcyqfS/ZEfRqh9TTjIwc8Jvt3iCnVz/RrtrIh2IC/gb +qjSm/Iz13X9ljIwxVzHQNuxHoc/Li6jvHBhYxQZ3ykubUa9MCEp6j+KjUuKOjswm +5LLY5TjCqO3GgZw1a6lYYUoKl7RLQrZVnb6Z53BtWfhtKgx/GWBfDJqIbDCsUgmQ +Fhv/K53b0CDKieoofjKOGd97SDMe12X4rsn4gxSTdn1k0I7OvjV9/3IxTZ+evR5s +L6iPDAZQ+4wns3bJ9ObXwzTijIchhmH+v1V04SF3AwpobLvkyanmz1kl63zsRQ55 +ZmjoIs2475iFTZYRPAmK0H+8KCgT+2rKVI2SXM3CZZgGns5IW9S1N5NGQXwH3c/6 +Q++6Z2H/fUnguzB9XIDj5hY5S6c= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCB +hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV +BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMTE5 +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT +EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR +6FSS0gpWsawNJN3Fz0RndJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8X +pz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZFGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC +9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+5eNu/Nio5JIk2kNrYrhV +/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pGx8cgoLEf +Zd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z ++pUX2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7w +qP/0uK3pN/u6uPQLOvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZah +SL0896+1DSJMwBGB7FY79tOi4lu3sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVIC +u9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+CGCe01a60y1Dma/RMhnEw6abf +Fobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5WdYgGq/yapiq +crxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E +FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB +/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvl +wFTPoCWOAvn9sKIN9SCYPBMtrFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM +4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV +2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSgtZx8jb8uk2Intzna +FxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwWsRqZ +CuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiK +boHGhfKppC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmcke +jkk9u+UJueBPSZI9FoJAzMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yL +S0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHqZJx64SIDqZxubw5lT2yHh17zbqD5daWb +QOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk527RH89elWsn2/x20Kk4yl +0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7ILaZRfyHB +NVOFBkpdn627G190 +-----END CERTIFICATE----- diff --git a/doc/release-process.md b/doc/release-process.md index c199086d28d3d..255c2c05a37f7 100644 --- a/doc/release-process.md +++ b/doc/release-process.md @@ -169,7 +169,38 @@ Commit your signature to gitian.sigs: git push # Assuming you can push to the gitian.sigs tree popd -Wait for Windows/OS X detached signatures: +Codesigner only: Create Windows/OS X detached signatures: +- Only one person handles codesigning. Everyone else should skip to the next step. +- Only once the Windows/OS X builds each have 3 matching signatures may they be signed with their respective release keys. + +Codesigner only: Sign the osx binary: + + transfer dashcore-osx-unsigned.tar.gz to osx for signing + tar xf dashcore-osx-unsigned.tar.gz + ./detached-sig-create.sh -s "Key ID" + Enter the keychain password and authorize the signature + Move signature-osx.tar.gz back to the gitian host + +Codesigner only: Sign the windows binaries: + + tar xf dashcore-win-unsigned.tar.gz + ./detached-sig-create.sh -key /path/to/codesign.key + Enter the passphrase for the key when prompted + signature-win.tar.gz will be created + +Codesigner only: Commit the detached codesign payloads: + + cd ~/dashcore-detached-sigs + checkout the appropriate branch for this release series + rm -rf * + tar xf signature-osx.tar.gz + tar xf signature-win.tar.gz + git add -a + git commit -m "point to ${VERSION}" + git tag -s v${VERSION} HEAD + git push the current branch and new tag + +Non-codesigners: wait for Windows/OS X detached signatures: - Once the Windows/OS X builds each have 3 matching signatures, they will be signed with their respective release keys. - Detached signatures will then be committed to the [dash-detached-sigs](https://github.com/dashpay/dash-detached-sigs) repository, which can be combined with the unsigned apps to create signed binaries. diff --git a/qa/rpc-tests/abandonconflict.py b/qa/rpc-tests/abandonconflict.py index b32d4e2ce0830..7c581ea85da60 100755 --- a/qa/rpc-tests/abandonconflict.py +++ b/qa/rpc-tests/abandonconflict.py @@ -23,8 +23,8 @@ def __init__(self): def setup_network(self): self.nodes = [] - self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.00001"])) - self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-logtimemicros"])) + self.nodes.append(start_node(0, self.options.tmpdir, ["-minrelaytxfee=0.00001"])) + self.nodes.append(start_node(1, self.options.tmpdir)) connect_nodes(self.nodes[0], 1) def run_test(self): @@ -82,7 +82,7 @@ def run_test(self): # TODO: redo with eviction # Note had to make sure tx did not have AllowFree priority stop_node(self.nodes[0],0) - self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.0001"]) + self.nodes[0]=start_node(0, self.options.tmpdir, ["-minrelaytxfee=0.0001"]) # Verify txs no longer in mempool assert_equal(len(self.nodes[0].getrawmempool()), 0) @@ -108,7 +108,7 @@ def run_test(self): # Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned stop_node(self.nodes[0],0) - self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.00001"]) + self.nodes[0]=start_node(0, self.options.tmpdir, ["-minrelaytxfee=0.00001"]) assert_equal(len(self.nodes[0].getrawmempool()), 0) assert_equal(self.nodes[0].getbalance(), balance) @@ -128,7 +128,7 @@ def run_test(self): # Remove using high relay fee again stop_node(self.nodes[0],0) - self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.0001"]) + self.nodes[0]=start_node(0, self.options.tmpdir, ["-minrelaytxfee=0.0001"]) assert_equal(len(self.nodes[0].getrawmempool()), 0) newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance - Decimal("24.9996")) @@ -159,9 +159,9 @@ def run_test(self): self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) newbalance = self.nodes[0].getbalance() #assert_equal(newbalance, balance - Decimal("10")) - print("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer") - print("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315") - print(str(balance) + " -> " + str(newbalance) + " ?") + self.log.info("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer") + self.log.info("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315") + self.log.info(str(balance) + " -> " + str(newbalance) + " ?") if __name__ == '__main__': AbandonConflictTest().main() diff --git a/qa/rpc-tests/addressindex.py b/qa/rpc-tests/addressindex.py index d7c2302f696b3..f3ea0f53975aa 100755 --- a/qa/rpc-tests/addressindex.py +++ b/qa/rpc-tests/addressindex.py @@ -23,11 +23,11 @@ def __init__(self): def setup_network(self): self.nodes = [] # Nodes 0/1 are "wallet" nodes - self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-relaypriority=0"])) - self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-addressindex"])) + self.nodes.append(start_node(0, self.options.tmpdir, ["-relaypriority=0"])) + self.nodes.append(start_node(1, self.options.tmpdir, ["-addressindex"])) # Nodes 2/3 are used for testing - self.nodes.append(start_node(2, self.options.tmpdir, ["-debug", "-addressindex", "-relaypriority=0"])) - self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-addressindex"])) + self.nodes.append(start_node(2, self.options.tmpdir, ["-addressindex", "-relaypriority=0"])) + self.nodes.append(start_node(3, self.options.tmpdir, ["-addressindex"])) connect_nodes(self.nodes[0], 1) connect_nodes(self.nodes[0], 2) connect_nodes(self.nodes[0], 3) @@ -36,7 +36,7 @@ def setup_network(self): self.sync_all() def run_test(self): - print("Mining blocks...") + self.log.info("Mining blocks...") self.nodes[0].generate(105) self.sync_all() @@ -50,7 +50,7 @@ def run_test(self): assert_equal(balance0["balance"], 0) # Check p2pkh and p2sh address indexes - print("Testing p2pkh and p2sh address index...") + self.log.info("Testing p2pkh and p2sh address index...") txid0 = self.nodes[0].sendtoaddress("yMNJePdcKvXtWWQnFYHNeJ5u8TF2v1dfK4", 10) self.nodes[0].generate(1) @@ -85,7 +85,7 @@ def run_test(self): assert_equal(txidsb[2], txidb2) # Check that limiting by height works - print("Testing querying txids by range of block heights..") + self.log.info("Testing querying txids by range of block heights..") height_txids = self.nodes[1].getaddresstxids({ "addresses": ["93bVhahvUKmQu8gu9g3QnPPa2cxFK98pMB"], "start": 105, @@ -110,7 +110,7 @@ def run_test(self): assert_equal(balance0["balance"], 45 * 100000000) # Check that outputs with the same address will only return one txid - print("Testing for txid uniqueness...") + self.log.info("Testing for txid uniqueness...") addressHash = binascii.unhexlify("FE30B718DCF0BF8A2A686BF1820C073F8B2C3B37") scriptPubKey = CScript([OP_HASH160, addressHash, OP_EQUAL]) unspent = self.nodes[0].listunspent() @@ -130,12 +130,12 @@ def run_test(self): assert_equal(txidsmany[3], sent_txid) # Check that balances are correct - print("Testing balances...") + self.log.info("Testing balances...") balance0 = self.nodes[1].getaddressbalance("93bVhahvUKmQu8gu9g3QnPPa2cxFK98pMB") assert_equal(balance0["balance"], 45 * 100000000 + 21) # Check that balances are correct after spending - print("Testing balances after spending...") + self.log.info("Testing balances after spending...") privkey2 = "cU4zhap7nPJAWeMFu4j6jLrfPmqakDAzy8zn8Fhb3oEevdm4e5Lc" address2 = "yeMpGzMj3rhtnz48XsfpB8itPHhHtgxLc3" addressHash2 = binascii.unhexlify("C5E4FB9171C22409809A3E8047A29C83886E325D") @@ -188,13 +188,13 @@ def run_test(self): assert_equal(len(deltas), 1) # Check that unspent outputs can be queried - print("Testing utxos...") + self.log.info("Testing utxos...") utxos = self.nodes[1].getaddressutxos({"addresses": [address2]}) assert_equal(len(utxos), 1) assert_equal(utxos[0]["satoshis"], change_amount) # Check that indexes will be updated with a reorg - print("Testing reorg...") + self.log.info("Testing reorg...") best_hash = self.nodes[0].getbestblockhash() self.nodes[0].invalidateblock(best_hash) @@ -229,7 +229,7 @@ def run_test(self): assert_equal(utxos3[2]["height"], 265) # Check mempool indexing - print("Testing mempool indexing...") + self.log.info("Testing mempool indexing...") privKey3 = "cRyrMvvqi1dmpiCmjmmATqjAwo6Wu7QTjKu1ABMYW5aFG4VXW99K" address3 = "yWB15aAdpeKuSaQHFVJpBDPbNSLZJSnDLA" @@ -327,7 +327,7 @@ def run_test(self): mempool_deltas = self.nodes[2].getaddressmempool({"addresses": [address1]}) assert_equal(len(mempool_deltas), 2) - print("Passed\n") + self.log.info("Passed") if __name__ == '__main__': diff --git a/qa/rpc-tests/assumevalid.py b/qa/rpc-tests/assumevalid.py index b8dafff161a91..c60c8e6d1a914 100755 --- a/qa/rpc-tests/assumevalid.py +++ b/qa/rpc-tests/assumevalid.py @@ -73,7 +73,7 @@ def setup_network(self): # we need to pre-mine a block with an invalid transaction # signature so we can pass in the block hash as assumevalid. self.nodes = [] - self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"])) + self.nodes.append(start_node(0, self.options.tmpdir)) def run_test(self): @@ -146,14 +146,14 @@ def run_test(self): # Start node1 and node2 with assumevalid so they accept a block with a bad signature. self.nodes.append(start_node(1, self.options.tmpdir, - ["-debug", "-assumevalid=" + hex(block102.sha256)])) + ["-assumevalid=" + hex(block102.sha256)])) node1 = BaseNode() # connects to node1 connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], node1)) node1.add_connection(connections[1]) node1.wait_for_verack() self.nodes.append(start_node(2, self.options.tmpdir, - ["-debug", "-assumevalid=" + hex(block102.sha256)])) + ["-assumevalid=" + hex(block102.sha256)])) node2 = BaseNode() # connects to node2 connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2)) node2.add_connection(connections[2]) diff --git a/qa/rpc-tests/bip65-cltv-p2p.py b/qa/rpc-tests/bip65-cltv-p2p.py index cdd4977d1d71d..107e8245f83ea 100755 --- a/qa/rpc-tests/bip65-cltv-p2p.py +++ b/qa/rpc-tests/bip65-cltv-p2p.py @@ -42,7 +42,7 @@ def __init__(self): def setup_network(self): # Must set the blockversion for this test self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, - extra_args=[['-debug', '-whitelist=127.0.0.1', '-blockversion=3']], + extra_args=[['-whitelist=127.0.0.1', '-blockversion=3']], binary=[self.options.testbinary]) def run_test(self): diff --git a/qa/rpc-tests/bip68-112-113-p2p.py b/qa/rpc-tests/bip68-112-113-p2p.py index e90b8a0b5023d..27eaebcbc53a5 100755 --- a/qa/rpc-tests/bip68-112-113-p2p.py +++ b/qa/rpc-tests/bip68-112-113-p2p.py @@ -100,7 +100,7 @@ def setup_network(self): # Must set the blockversion for this test # Must also set '-maxtipage=600100' to allow syncing from very old blocks self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, - extra_args=[['-debug', '-whitelist=127.0.0.1', '-blockversion=4', '-maxtipage=600100']], + extra_args=[['-whitelist=127.0.0.1', '-blockversion=4', '-maxtipage=600100']], binary=[self.options.testbinary]) def run_test(self): diff --git a/qa/rpc-tests/bip68-sequence.py b/qa/rpc-tests/bip68-sequence.py index 86c1622dce88a..fccec291272dd 100755 --- a/qa/rpc-tests/bip68-sequence.py +++ b/qa/rpc-tests/bip68-sequence.py @@ -24,8 +24,8 @@ def __init__(self): def setup_network(self): self.nodes = [] - self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-blockprioritysize=0"])) - self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-blockprioritysize=0", "-acceptnonstdtxn=0"])) + self.nodes.append(start_node(0, self.options.tmpdir, ["-blockprioritysize=0"])) + self.nodes.append(start_node(1, self.options.tmpdir, ["-blockprioritysize=0", "-acceptnonstdtxn=0"])) self.is_network_split = False self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] connect_nodes(self.nodes[0], 1) @@ -34,26 +34,26 @@ def run_test(self): # Generate some coins self.nodes[0].generate(110) - print("Running test disable flag") + self.log.info("Running test disable flag") self.test_disable_flag() - print("Running test sequence-lock-confirmed-inputs") + self.log.info("Running test sequence-lock-confirmed-inputs") self.test_sequence_lock_confirmed_inputs() - print("Running test sequence-lock-unconfirmed-inputs") + self.log.info("Running test sequence-lock-unconfirmed-inputs") self.test_sequence_lock_unconfirmed_inputs() - print("Running test BIP68 not consensus before versionbits activation") + self.log.info("Running test BIP68 not consensus before versionbits activation") self.test_bip68_not_consensus() - print("Activating BIP68 (and 112/113)") + self.log.info("Activating BIP68 (and 112/113)") self.activateCSV() - print("Verifying nVersion=2 transactions are standard.") - print("Note that with current versions of Dash Core software, nVersion=2 transactions are always standard (independent of BIP68 activation status).") + self.log.info("Verifying nVersion=2 transactions are standard.") + self.log.info("Note that nVersion=2 transactions are always standard (independent of BIP68 activation status).") self.test_version2_relay() - print("Passed\n") + self.log.info("Passed") # Test that BIP68 is not in effect if tx version is 1, or if # the first sequence bit is set. diff --git a/qa/rpc-tests/bip9-softforks.py b/qa/rpc-tests/bip9-softforks.py index 5f86117c40604..e21a396f2e9ed 100755 --- a/qa/rpc-tests/bip9-softforks.py +++ b/qa/rpc-tests/bip9-softforks.py @@ -35,7 +35,7 @@ def __init__(self): def setup_network(self): self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, - extra_args=[['-debug', '-whitelist=127.0.0.1']], + extra_args=[['-whitelist=127.0.0.1']], binary=[self.options.testbinary]) def run_test(self): diff --git a/qa/rpc-tests/bipdersig-p2p.py b/qa/rpc-tests/bipdersig-p2p.py index d8abedd954349..5e7b1331b4c1d 100755 --- a/qa/rpc-tests/bipdersig-p2p.py +++ b/qa/rpc-tests/bipdersig-p2p.py @@ -49,7 +49,7 @@ def __init__(self): def setup_network(self): # Must set the blockversion for this test self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, - extra_args=[['-debug', '-whitelist=127.0.0.1', '-blockversion=2']], + extra_args=[['-whitelist=127.0.0.1', '-blockversion=2']], binary=[self.options.testbinary]) def run_test(self): diff --git a/qa/rpc-tests/blockchain.py b/qa/rpc-tests/blockchain.py index 93106919f7acf..4896468e03bb8 100755 --- a/qa/rpc-tests/blockchain.py +++ b/qa/rpc-tests/blockchain.py @@ -56,7 +56,7 @@ def _test_gettxoutsetinfo(self): assert_equal(len(res['bestblock']), 64) assert_equal(len(res['hash_serialized_2']), 64) - print("Test that gettxoutsetinfo() works for blockchain with just the genesis block") + self.log.info("Test that gettxoutsetinfo() works for blockchain with just the genesis block") b1hash = node.getblockhash(1) node.invalidateblock(b1hash) @@ -68,7 +68,7 @@ def _test_gettxoutsetinfo(self): assert_equal(res2['bestblock'], node.getblockhash(0)) assert_equal(len(res2['hash_serialized_2']), 64) - print("Test that gettxoutsetinfo() returns the same result after invalidate/reconsider block") + self.log.info("Test that gettxoutsetinfo() returns the same result after invalidate/reconsider block") node.reconsiderblock(b1hash) res3 = node.gettxoutsetinfo() diff --git a/qa/rpc-tests/dip3-deterministicmns.py b/qa/rpc-tests/dip3-deterministicmns.py index 66a4eb0641414..93229c0debacf 100755 --- a/qa/rpc-tests/dip3-deterministicmns.py +++ b/qa/rpc-tests/dip3-deterministicmns.py @@ -32,7 +32,7 @@ def setup_network(self): self.is_network_split = False def start_controller_node(self, extra_args=None): - print("starting controller node") + self.log.info("starting controller node") if self.nodes is None: self.nodes = [None] args = self.extra_args @@ -44,7 +44,7 @@ def start_controller_node(self, extra_args=None): connect_nodes_bi(self.nodes, 0, i) def stop_controller_node(self): - print("stopping controller node") + self.log.info("stopping controller node") stop_node(self.nodes[0], 0) def restart_controller_node(self): @@ -52,19 +52,19 @@ def restart_controller_node(self): self.start_controller_node() def run_test(self): - print("funding controller node") + self.log.info("funding controller node") while self.nodes[0].getbalance() < (self.num_initial_mn + 3) * 1000: self.nodes[0].generate(1) # generate enough for collaterals - print("controller node has {} dash".format(self.nodes[0].getbalance())) + self.log.info("controller node has {} dash".format(self.nodes[0].getbalance())) # Make sure we're below block 135 (which activates dip3) - print("testing rejection of ProTx before dip3 activation") + self.log.info("testing rejection of ProTx before dip3 activation") assert(self.nodes[0].getblockchaininfo()['blocks'] < 135) mns = [] # prepare mn which should still be accepted later when dip3 activates - print("creating collateral for mn-before-dip3") + self.log.info("creating collateral for mn-before-dip3") before_dip3_mn = self.prepare_mn(self.nodes[0], 1, 'mn-before-dip3') self.create_mn_collateral(self.nodes[0], before_dip3_mn) mns.append(before_dip3_mn) @@ -73,11 +73,11 @@ def run_test(self): while self.nodes[0].getblockcount() < 150: self.nodes[0].generate(1) - print("mining final block for DIP3 activation") + self.log.info("mining final block for DIP3 activation") self.nodes[0].generate(1) # We have hundreds of blocks to sync here, give it more time - print("syncing blocks for all nodes") + self.log.info("syncing blocks for all nodes") sync_blocks(self.nodes, timeout=120) # DIP3 has activated here @@ -85,7 +85,7 @@ def run_test(self): self.register_mn(self.nodes[0], before_dip3_mn) self.start_mn(before_dip3_mn) - print("registering MNs") + self.log.info("registering MNs") for i in range(0, self.num_initial_mn): mn = self.prepare_mn(self.nodes[0], i + 2, "mn-%d" % i) mns.append(mn) @@ -98,12 +98,12 @@ def run_test(self): # let a few of the protx MNs refer to the existing collaterals fund = (i % 2) == 0 if fund: - print("register_fund %s" % mn.alias) + self.log.info("register_fund %s" % mn.alias) self.register_fund_mn(self.nodes[0], mn) else: - print("create_collateral %s" % mn.alias) + self.log.info("create_collateral %s" % mn.alias) self.create_mn_collateral(self.nodes[0], mn) - print("register %s" % mn.alias) + self.log.info("register %s" % mn.alias) self.register_mn(self.nodes[0], mn) self.nodes[0].generate(1) @@ -114,10 +114,10 @@ def run_test(self): self.sync_all() self.assert_mnlists(mns) - print("testing instant send") + self.log.info("testing instant send") self.test_instantsend(10, 3) - print("test that MNs disappear from the list when the ProTx collateral is spent") + self.log.info("test that MNs disappear from the list when the ProTx collateral is spent") spend_mns_count = 3 mns_tmp = [] + mns dummy_txins = [] @@ -129,30 +129,30 @@ def run_test(self): mns_tmp.remove(mns[i]) self.assert_mnlists(mns_tmp) - print("test that reverting the blockchain on a single node results in the mnlist to be reverted as well") + self.log.info("test that reverting the blockchain on a single node results in the mnlist to be reverted as well") for i in range(spend_mns_count): self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) mns_tmp.append(mns[spend_mns_count - 1 - i]) self.assert_mnlist(self.nodes[0], mns_tmp) - print("cause a reorg with a double spend and check that mnlists are still correct on all nodes") + self.log.info("cause a reorg with a double spend and check that mnlists are still correct on all nodes") self.mine_double_spend(self.nodes[0], dummy_txins, self.nodes[0].getnewaddress(), use_mnmerkleroot_from_tip=True) self.nodes[0].generate(spend_mns_count) self.sync_all() self.assert_mnlists(mns_tmp) - print("test mn payment enforcement with deterministic MNs") + self.log.info("test mn payment enforcement with deterministic MNs") for i in range(20): node = self.nodes[i % len(self.nodes)] self.test_invalid_mn_payment(node) node.generate(1) self.sync_all() - print("testing ProUpServTx") + self.log.info("testing ProUpServTx") for mn in mns: self.test_protx_update_service(mn) - print("testing P2SH/multisig for payee addresses") + self.log.info("testing P2SH/multisig for payee addresses") multisig = self.nodes[0].createmultisig(1, [self.nodes[0].getnewaddress(), self.nodes[0].getnewaddress()])['address'] self.update_mn_payee(mns[0], multisig) found_multisig_payee = False @@ -171,7 +171,7 @@ def run_test(self): found_multisig_payee = True assert(found_multisig_payee) - print("testing reusing of collaterals for replaced MNs") + self.log.info("testing reusing of collaterals for replaced MNs") for i in range(0, 5): mn = mns[i] # a few of these will actually refer to old ProRegTx internal collaterals, @@ -186,15 +186,15 @@ def run_test(self): self.nodes[0].generate(1) self.sync_all() self.assert_mnlists(mns) - print("restarting MN %s" % new_mn.alias) + self.log.info("restarting MN %s" % new_mn.alias) self.stop_node(new_mn.idx) self.start_mn(new_mn) self.sync_all() - print("testing instant send with replaced MNs") + self.log.info("testing instant send with replaced MNs") self.test_instantsend(10, 3, timeout=20) - print("testing simple PoSe") + self.log.info("testing simple PoSe") self.assert_mnlists(mns) self.nodes[0].spork('SPORK_17_QUORUM_DKG_ENABLED', 0) self.wait_for_sporks() @@ -408,8 +408,8 @@ def assert_mnlist(self, node, mns): expected = [] for mn in mns: expected.append('%s-%d' % (mn.collateral_txid, mn.collateral_vout)) - print('mnlist: ' + str(node.masternode('list', 'status'))) - print('expected: ' + str(expected)) + self.log.error('mnlist: ' + str(node.masternode('list', 'status'))) + self.log.error('expected: ' + str(expected)) raise AssertionError("mnlists does not match provided mns") def wait_for_sporks(self, timeout=30): diff --git a/qa/rpc-tests/fundrawtransaction-hd.py b/qa/rpc-tests/fundrawtransaction-hd.py index b00d45157b2d8..6e0c7645a60e4 100755 --- a/qa/rpc-tests/fundrawtransaction-hd.py +++ b/qa/rpc-tests/fundrawtransaction-hd.py @@ -26,7 +26,7 @@ def setup_network(self, split=False): self.sync_all() def run_test(self): - print("Mining blocks...") + self.log.info("Mining blocks...") min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee'] # This test is not meant to test fee estimation and we'd like diff --git a/qa/rpc-tests/fundrawtransaction.py b/qa/rpc-tests/fundrawtransaction.py index 25b352316863c..18eb5d02baa67 100755 --- a/qa/rpc-tests/fundrawtransaction.py +++ b/qa/rpc-tests/fundrawtransaction.py @@ -34,8 +34,6 @@ def setup_network(self, split=False): self.sync_all() def run_test(self): - print("Mining blocks...") - min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee'] # This test is not meant to test fee estimation and we'd like # to be sure all txs are sent at a consistent desired feerate diff --git a/qa/rpc-tests/getblocktemplate_longpoll.py b/qa/rpc-tests/getblocktemplate_longpoll.py index 363d3827a1548..8f25197d78025 100755 --- a/qa/rpc-tests/getblocktemplate_longpoll.py +++ b/qa/rpc-tests/getblocktemplate_longpoll.py @@ -29,7 +29,7 @@ def __init__(self): self.setup_clean_chain = False def run_test(self): - print("Warning: this test will take about 70 seconds in the best case. Be patient.") + self.log.info("Warning: this test will take about 70 seconds in the best case. Be patient.") wait_to_sync(self.nodes[0]) self.nodes[0].generate(10) templat = self.nodes[0].getblocktemplate() @@ -62,7 +62,9 @@ def run_test(self): thr = LongpollThread(self.nodes[0]) thr.start() # generate a random transaction and submit it - (txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), Decimal("0.0"), Decimal("0.001"), 20) + min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"] + # min_relay_fee is fee per 1000 bytes, which should be more than enough. + (txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), min_relay_fee, Decimal("0.001"), 20) # after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned thr.join(60 + 20) assert(not thr.is_alive()) diff --git a/qa/rpc-tests/import-rescan.py b/qa/rpc-tests/import-rescan.py index 45954b011fb8b..fb5e1d6c702be 100755 --- a/qa/rpc-tests/import-rescan.py +++ b/qa/rpc-tests/import-rescan.py @@ -7,11 +7,11 @@ Test rescan behavior of importaddress, importpubkey, importprivkey, and importmulti RPCs with different types of keys and rescan options. -In the first part of the test, node 1 creates an address for each type of -import RPC call and node 0 sends BTC to it. Then other nodes import the -addresses, and the test makes listtransactions and getbalance calls to confirm -that the importing node either did or did not execute rescans picking up the -send transactions. +In the first part of the test, node 0 creates an address for each type of +import RPC call and sends BTC to it. Then other nodes import the addresses, +and the test makes listtransactions and getbalance calls to confirm that the +importing node either did or did not execute rescans picking up the send +transactions. In the second part of the test, node 0 sends more BTC to each address, and the test makes more listtransactions and getbalance calls to confirm that the @@ -117,7 +117,7 @@ def __init__(self): self.num_nodes = 2 + len(IMPORT_NODES) def setup_network(self): - extra_args = [["-debug=1"] for _ in range(self.num_nodes)] + extra_args = [[] for _ in range(self.num_nodes)] for i, import_node in enumerate(IMPORT_NODES, 2): if import_node.prune: # txindex is enabled by default in Dash and needs to be disabled for import-rescan.py diff --git a/qa/rpc-tests/importmulti.py b/qa/rpc-tests/importmulti.py index 015336effef3d..298b6e9b86ae3 100755 --- a/qa/rpc-tests/importmulti.py +++ b/qa/rpc-tests/importmulti.py @@ -17,7 +17,7 @@ def setup_network(self, split=False): self.is_network_split=False def run_test (self): - print ("Mining blocks...") + self.log.info("Mining blocks...") self.nodes[0].generate(1) self.nodes[1].generate(1) timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime'] @@ -48,7 +48,7 @@ def run_test (self): # RPC importmulti ----------------------------------------------- # Bitcoin Address - print("Should import an address") + self.log.info("Should import an address") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": { @@ -64,7 +64,7 @@ def run_test (self): watchonly_address = address['address'] watchonly_timestamp = timestamp - print("Should not import an invalid address") + self.log.info("Should not import an invalid address") result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": "not valid address", @@ -76,7 +76,7 @@ def run_test (self): assert_equal(result[0]['error']['message'], 'Invalid address') # ScriptPubKey + internal - print("Should import a scriptPubKey with internal flag") + self.log.info("Should import a scriptPubKey with internal flag") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": address['scriptPubKey'], @@ -90,7 +90,7 @@ def run_test (self): assert_equal(address_assert['timestamp'], timestamp) # ScriptPubKey + !internal - print("Should not import a scriptPubKey without internal flag") + self.log.info("Should not import a scriptPubKey without internal flag") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": address['scriptPubKey'], @@ -106,7 +106,7 @@ def run_test (self): # Address + Public key + !Internal - print("Should import an address with public key") + self.log.info("Should import an address with public key") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": { @@ -123,7 +123,7 @@ def run_test (self): # ScriptPubKey + Public key + internal - print("Should import a scriptPubKey with internal and with public key") + self.log.info("Should import a scriptPubKey with internal and with public key") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) request = [{ "scriptPubKey": address['scriptPubKey'], @@ -139,7 +139,7 @@ def run_test (self): assert_equal(address_assert['timestamp'], timestamp) # ScriptPubKey + Public key + !internal - print("Should not import a scriptPubKey without internal and with public key") + self.log.info("Should not import a scriptPubKey without internal and with public key") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) request = [{ "scriptPubKey": address['scriptPubKey'], @@ -156,7 +156,7 @@ def run_test (self): assert_equal('timestamp' in address_assert, False) # Address + Private key + !watchonly - print("Should import an address with private key") + self.log.info("Should import an address with private key") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": { @@ -172,7 +172,7 @@ def run_test (self): assert_equal(address_assert['timestamp'], timestamp) # Address + Private key + watchonly - print("Should not import an address with private key and with watchonly") + self.log.info("Should not import an address with private key and with watchonly") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": { @@ -191,7 +191,7 @@ def run_test (self): assert_equal('timestamp' in address_assert, False) # ScriptPubKey + Private key + internal - print("Should import a scriptPubKey with internal and with private key") + self.log.info("Should import a scriptPubKey with internal and with private key") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": address['scriptPubKey'], @@ -206,7 +206,7 @@ def run_test (self): assert_equal(address_assert['timestamp'], timestamp) # ScriptPubKey + Private key + !internal - print("Should not import a scriptPubKey without internal and with private key") + self.log.info("Should not import a scriptPubKey without internal and with private key") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": address['scriptPubKey'], @@ -233,7 +233,7 @@ def run_test (self): timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime'] transaction = self.nodes[1].gettransaction(transactionid) - print("Should import a p2sh") + self.log.info("Should import a p2sh") result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": multi_sig_script['address'] @@ -261,7 +261,7 @@ def run_test (self): timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime'] transaction = self.nodes[1].gettransaction(transactionid) - print("Should import a p2sh with respective redeem script") + self.log.info("Should import a p2sh with respective redeem script") result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": multi_sig_script['address'] @@ -289,7 +289,7 @@ def run_test (self): timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime'] transaction = self.nodes[1].gettransaction(transactionid) - print("Should import a p2sh with respective redeem script and private keys") + self.log.info("Should import a p2sh with respective redeem script and private keys") result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": multi_sig_script['address'] @@ -316,7 +316,7 @@ def run_test (self): self.nodes[1].generate(1) transaction = self.nodes[1].gettransaction(transactionid) - print("Should import a p2sh with respective redeem script and private keys") + self.log.info("Should import a p2sh with respective redeem script and private keys") result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": multi_sig_script['address'] @@ -332,7 +332,7 @@ def run_test (self): # Address + Public key + !Internal + Wrong pubkey - print("Should not import an address with a wrong public key") + self.log.info("Should not import an address with a wrong public key") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ @@ -352,7 +352,7 @@ def run_test (self): # ScriptPubKey + Public key + internal + Wrong pubkey - print("Should not import a scriptPubKey with internal and with a wrong public key") + self.log.info("Should not import a scriptPubKey with internal and with a wrong public key") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) request = [{ @@ -372,7 +372,7 @@ def run_test (self): # Address + Private key + !watchonly + Wrong private key - print("Should not import an address with a wrong private key") + self.log.info("Should not import an address with a wrong private key") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ @@ -392,7 +392,7 @@ def run_test (self): # ScriptPubKey + Private key + internal + Wrong private key - print("Should not import a scriptPubKey with internal and with a wrong private key") + self.log.info("Should not import a scriptPubKey with internal and with a wrong private key") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ @@ -418,7 +418,7 @@ def run_test (self): assert_equal(address_assert['timestamp'], watchonly_timestamp); # Bad or missing timestamps - print("Should throw on invalid or missing timestamp values") + self.log.info("Should throw on invalid or missing timestamp values") assert_raises_message(JSONRPCException, 'Missing required timestamp field for key', self.nodes[1].importmulti, [{ "scriptPubKey": address['scriptPubKey'], diff --git a/qa/rpc-tests/importprunedfunds.py b/qa/rpc-tests/importprunedfunds.py index ecb67f7220b8f..d9147da10b62f 100755 --- a/qa/rpc-tests/importprunedfunds.py +++ b/qa/rpc-tests/importprunedfunds.py @@ -21,7 +21,7 @@ def setup_network(self, split=False): self.sync_all() def run_test(self): - print("Mining blocks...") + self.log.info("Mining blocks...") self.nodes[0].generate(101) self.sync_all() diff --git a/qa/rpc-tests/invalidateblock.py b/qa/rpc-tests/invalidateblock.py index 92e65927d571c..8c80b640035a1 100755 --- a/qa/rpc-tests/invalidateblock.py +++ b/qa/rpc-tests/invalidateblock.py @@ -18,53 +18,51 @@ def __init__(self): def setup_network(self): self.nodes = [] self.is_network_split = False - self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"])) - self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"])) - self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"])) + self.nodes.append(start_node(0, self.options.tmpdir)) + self.nodes.append(start_node(1, self.options.tmpdir)) + self.nodes.append(start_node(2, self.options.tmpdir)) def run_test(self): - print("Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:") - print("Mine 4 blocks on Node 0") + self.log.info("Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:") + self.log.info("Mine 4 blocks on Node 0") self.nodes[0].generate(4) assert(self.nodes[0].getblockcount() == 4) besthash = self.nodes[0].getbestblockhash() - print("Mine competing 6 blocks on Node 1") + self.log.info("Mine competing 6 blocks on Node 1") self.nodes[1].generate(6) assert(self.nodes[1].getblockcount() == 6) - print("Connect nodes to force a reorg") + self.log.info("Connect nodes to force a reorg") connect_nodes_bi(self.nodes,0,1) sync_blocks(self.nodes[0:2]) assert(self.nodes[0].getblockcount() == 6) badhash = self.nodes[1].getblockhash(2) - print("Invalidate block 2 on node 0 and verify we reorg to node 0's original chain") + self.log.info("Invalidate block 2 on node 0 and verify we reorg to node 0's original chain") self.nodes[0].invalidateblock(badhash) newheight = self.nodes[0].getblockcount() newhash = self.nodes[0].getbestblockhash() if (newheight != 4 or newhash != besthash): raise AssertionError("Wrong tip for node0, hash %s, height %d"%(newhash,newheight)) - print("\nMake sure we won't reorg to a lower work chain:") + self.log.info("Make sure we won't reorg to a lower work chain:") connect_nodes_bi(self.nodes,1,2) - print("Sync node 2 to node 1 so both have 6 blocks") + self.log.info("Sync node 2 to node 1 so both have 6 blocks") sync_blocks(self.nodes[1:3]) assert(self.nodes[2].getblockcount() == 6) - print("Invalidate block 5 on node 1 so its tip is now at 4") + self.log.info("Invalidate block 5 on node 1 so its tip is now at 4") self.nodes[1].invalidateblock(self.nodes[1].getblockhash(5)) assert(self.nodes[1].getblockcount() == 4) - print("Invalidate block 3 on node 2, so its tip is now 2") + self.log.info("Invalidate block 3 on node 2, so its tip is now 2") self.nodes[2].invalidateblock(self.nodes[2].getblockhash(3)) assert(self.nodes[2].getblockcount() == 2) - print("..and then mine a block") + self.log.info("..and then mine a block") self.nodes[2].generate(1) - print("Verify all nodes are at the right height") + self.log.info("Verify all nodes are at the right height") time.sleep(5) - for i in range(3): - print(i,self.nodes[i].getblockcount()) - assert(self.nodes[2].getblockcount() == 3) - assert(self.nodes[0].getblockcount() == 4) + assert_equal(self.nodes[2].getblockcount(), 3) + assert_equal(self.nodes[0].getblockcount(), 4) node1height = self.nodes[1].getblockcount() if node1height < 4: raise AssertionError("Node 1 reorged to a lower height: %d"%node1height) diff --git a/qa/rpc-tests/listsinceblock.py b/qa/rpc-tests/listsinceblock.py index 195934f1e8c1a..852d6360e9d70 100755 --- a/qa/rpc-tests/listsinceblock.py +++ b/qa/rpc-tests/listsinceblock.py @@ -62,7 +62,7 @@ def run_test (self): # generate on both sides lastblockhash = self.nodes[1].generate(6)[5] self.nodes[2].generate(7) - print('lastblockhash=%s' % (lastblockhash)) + self.log.info('lastblockhash=%s' % (lastblockhash)) self.sync_all() diff --git a/qa/rpc-tests/maxblocksinflight.py b/qa/rpc-tests/maxblocksinflight.py index 116e87177c136..9eaa254649fd9 100755 --- a/qa/rpc-tests/maxblocksinflight.py +++ b/qa/rpc-tests/maxblocksinflight.py @@ -13,7 +13,6 @@ from test_framework.mininode import * from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * -import logging MAX_REQUESTS = 128 @@ -33,7 +32,6 @@ def on_close(self, conn): def __init__(self): NodeConnCB.__init__(self) - self.log = logging.getLogger("BlockRelayTest") def add_new_connection(self, connection): self.connection = connection @@ -65,7 +63,7 @@ def run(self): raise AssertionError("Error, test failed: block %064x requested more than once" % key) if total_requests > MAX_REQUESTS: raise AssertionError("Error, too many blocks (%d) requested" % total_requests) - print("Round %d: success (total requests: %d)" % (count, total_requests)) + self.log.info("Round %d: success (total requests: %d)" % (count, total_requests)) self.disconnectOkay = True self.connection.disconnect_node() @@ -84,11 +82,13 @@ def __init__(self): def setup_network(self): self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, - extra_args=[['-debug', '-whitelist=127.0.0.1']], + extra_args=[['-whitelist=127.0.0.1']], binary=[self.options.testbinary]) def run_test(self): test = TestManager() + # pass log handler through to the test manager object + test.log = self.log test.add_new_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test)) NetworkThread().start() # Start up network handling in another thread test.run() diff --git a/qa/rpc-tests/maxuploadtarget.py b/qa/rpc-tests/maxuploadtarget.py index df4450bc84b23..1500e24b9961e 100755 --- a/qa/rpc-tests/maxuploadtarget.py +++ b/qa/rpc-tests/maxuploadtarget.py @@ -90,7 +90,7 @@ def __init__(self): def setup_network(self): # Start a node with maxuploadtarget of 200 MB (/24h) self.nodes = [] - self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-maxuploadtarget=200", "-blockmaxsize=999000", "-maxtipage="+str(2*60*60*24*7)])) + self.nodes.append(start_node(0, self.options.tmpdir, ["-maxuploadtarget=200", "-blockmaxsize=999000", "-maxtipage="+str(2*60*60*24*7)])) def run_test(self): # Advance all nodes 2 weeks in the future @@ -166,7 +166,7 @@ def run_test(self): test_nodes[0].send_message(getdata_request) test_nodes[0].wait_for_disconnect() assert_equal(len(self.nodes[0].getpeerinfo()), 2) - print("Peer 0 disconnected after downloading old block too many times") + self.log.info("Peer 0 disconnected after downloading old block too many times") # Requesting the current block on test_nodes[1] should succeed indefinitely, # even when over the max upload target. @@ -177,7 +177,7 @@ def run_test(self): test_nodes[1].sync_with_ping() assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1) - print("Peer 1 able to repeatedly download new block") + self.log.info("Peer 1 able to repeatedly download new block") # But if test_nodes[1] tries for an old block, it gets disconnected too. getdata_request.inv = [CInv(2, big_old_block)] @@ -185,9 +185,9 @@ def run_test(self): test_nodes[1].wait_for_disconnect() assert_equal(len(self.nodes[0].getpeerinfo()), 1) - print("Peer 1 disconnected after trying to download old block") + self.log.info("Peer 1 disconnected after trying to download old block") - print("Advancing system time on node to clear counters...") + self.log.info("Advancing system time on node to clear counters...") # If we advance the time by 24 hours, then the counters should reset, # and test_nodes[2] should be able to retrieve the old block. @@ -197,14 +197,14 @@ def run_test(self): test_nodes[2].sync_with_ping() assert_equal(test_nodes[2].block_receive_map[big_old_block], 1) - print("Peer 2 able to download old block") + self.log.info("Peer 2 able to download old block") [c.disconnect_node() for c in connections] #stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1 - print("Restarting nodes with -whitelist=127.0.0.1") + self.log.info("Restarting nodes with -whitelist=127.0.0.1") stop_node(self.nodes[0], 0) - self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000", "-maxtipage="+str(2*60*60*24*7)]) + self.nodes[0] = start_node(0, self.options.tmpdir, ["-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000", "-maxtipage="+str(2*60*60*24*7)]) #recreate/reconnect 3 test nodes test_nodes = [] @@ -230,7 +230,7 @@ def run_test(self): test_nodes[1].wait_for_disconnect() assert_equal(len(self.nodes[0].getpeerinfo()), 3) #node is still connected because of the whitelist - print("Peer 1 still connected after trying to download old block (whitelisted)") + self.log.info("Peer 1 still connected after trying to download old block (whitelisted)") [c.disconnect_node() for c in connections] diff --git a/qa/rpc-tests/mempool_limit.py b/qa/rpc-tests/mempool_limit.py index e3fd7952c0fa5..aae9143fd6f60 100755 --- a/qa/rpc-tests/mempool_limit.py +++ b/qa/rpc-tests/mempool_limit.py @@ -11,7 +11,7 @@ class MempoolLimitTest(BitcoinTestFramework): def setup_network(self): self.nodes = [] - self.nodes.append(start_node(0, self.options.tmpdir, ["-maxmempool=5", "-spendzeroconfchange=0", "-debug"])) + self.nodes.append(start_node(0, self.options.tmpdir, ["-maxmempool=5", "-spendzeroconfchange=0"])) self.is_network_split = False self.sync_all() self.relayfee = self.nodes[0].getnetworkinfo()['relayfee'] diff --git a/qa/rpc-tests/mempool_packages.py b/qa/rpc-tests/mempool_packages.py index bd9bccf2de174..96aad919b2484 100755 --- a/qa/rpc-tests/mempool_packages.py +++ b/qa/rpc-tests/mempool_packages.py @@ -19,8 +19,8 @@ def __init__(self): def setup_network(self): self.nodes = [] - self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000", "-debug"])) - self.nodes.append(start_node(1, self.options.tmpdir, ["-maxorphantx=1000", "-limitancestorcount=5", "-debug"])) + self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000"])) + self.nodes.append(start_node(1, self.options.tmpdir, ["-maxorphantx=1000", "-limitancestorcount=5"])) connect_nodes(self.nodes[0], 1) self.is_network_split = False self.sync_all() @@ -127,7 +127,7 @@ def run_test(self): try: self.chain_transaction(self.nodes[0], txid, vout, value, fee, 1) except JSONRPCException as e: - print("too-long-ancestor-chain successfully rejected") + self.log.info("too-long-ancestor-chain successfully rejected") # Check that prioritising a tx before it's added to the mempool works # First clear the mempool by mining a block. @@ -177,9 +177,9 @@ def run_test(self): mempool = self.nodes[0].getrawmempool(True) assert_equal(mempool[parent_transaction]['descendantcount'], MAX_DESCENDANTS) except JSONRPCException as e: - print(e.error['message']) + self.log.info(e.error['message']) assert_equal(i, MAX_DESCENDANTS - 1) - print("tx that would create too large descendant package successfully rejected") + self.log.info("tx that would create too large descendant package successfully rejected") # TODO: check that node1's mempool is as expected diff --git a/qa/rpc-tests/mempool_reorg.py b/qa/rpc-tests/mempool_reorg.py index f8be8c284ad04..9b64610d61d52 100755 --- a/qa/rpc-tests/mempool_reorg.py +++ b/qa/rpc-tests/mempool_reorg.py @@ -21,7 +21,7 @@ def __init__(self): alert_filename = None # Set by setup_network def setup_network(self): - args = ["-checkmempool", "-debug=mempool"] + args = ["-checkmempool"] self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, args)) self.nodes.append(start_node(1, self.options.tmpdir, args)) diff --git a/qa/rpc-tests/mempool_resurrect_test.py b/qa/rpc-tests/mempool_resurrect_test.py index 1d0be2efbc771..7ff8bbea6fc2c 100755 --- a/qa/rpc-tests/mempool_resurrect_test.py +++ b/qa/rpc-tests/mempool_resurrect_test.py @@ -17,7 +17,7 @@ def __init__(self): def setup_network(self): # Just need one node for this test - args = ["-checkmempool", "-debug=mempool"] + args = ["-checkmempool"] self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, args)) self.is_network_split = False diff --git a/qa/rpc-tests/mempool_spendcoinbase.py b/qa/rpc-tests/mempool_spendcoinbase.py index c902de06960f7..53622e54babd5 100755 --- a/qa/rpc-tests/mempool_spendcoinbase.py +++ b/qa/rpc-tests/mempool_spendcoinbase.py @@ -25,7 +25,7 @@ def __init__(self): def setup_network(self): # Just need one node for this test - args = ["-checkmempool", "-debug=mempool"] + args = ["-checkmempool"] self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, args)) self.is_network_split = False diff --git a/qa/rpc-tests/merkle_blocks.py b/qa/rpc-tests/merkle_blocks.py index 327d94ee23d41..a43eccfa095ea 100755 --- a/qa/rpc-tests/merkle_blocks.py +++ b/qa/rpc-tests/merkle_blocks.py @@ -17,11 +17,11 @@ def __init__(self): def setup_network(self): self.nodes = [] # Nodes 0/1 are "wallet" nodes - self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"])) - self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"])) + self.nodes.append(start_node(0, self.options.tmpdir)) + self.nodes.append(start_node(1, self.options.tmpdir)) # Nodes 2/3 are used for testing - self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"])) - self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-txindex"])) + self.nodes.append(start_node(2, self.options.tmpdir)) + self.nodes.append(start_node(3, self.options.tmpdir, ["-txindex"])) connect_nodes(self.nodes[0], 1) connect_nodes(self.nodes[0], 2) connect_nodes(self.nodes[0], 3) @@ -30,7 +30,7 @@ def setup_network(self): self.sync_all() def run_test(self): - print("Mining blocks...") + self.log.info("Mining blocks...") self.nodes[0].generate(105) self.sync_all() diff --git a/qa/rpc-tests/multikeysporks.py b/qa/rpc-tests/multikeysporks.py index 8d4f8c38e670e..e6bb77eb19b53 100755 --- a/qa/rpc-tests/multikeysporks.py +++ b/qa/rpc-tests/multikeysporks.py @@ -51,7 +51,7 @@ def setup_network(self): # address(base58): yc5TGfcHYoLCrcbVy4umsiDjsYUn39vLui self.nodes.append(start_node(0, self.options.tmpdir, - ["-debug", "-sporkkey=931wyuRNVYvhg18Uu9bky5Qg1z4QbxaJ7fefNBzjBPiLRqcd33F", + ["-sporkkey=931wyuRNVYvhg18Uu9bky5Qg1z4QbxaJ7fefNBzjBPiLRqcd33F", "-sporkaddr=ygcG5S2pQz2U1UAaHvU6EznKZW7yapKMA7", "-sporkaddr=yfLSXFfipnkgYioD6L8aUNyfRgEBuJv48h", "-sporkaddr=yNsMZhEhYqv14TgdYb1NS2UmNZjE8FSJxa", @@ -59,7 +59,7 @@ def setup_network(self): "-sporkaddr=yc5TGfcHYoLCrcbVy4umsiDjsYUn39vLui", "-minsporkkeys=3"])) self.nodes.append(start_node(1, self.options.tmpdir, - ["-debug", "-sporkkey=91vbXGMSWKGHom62986XtL1q2mQDA12ngcuUNNe5NfMSj44j7g3", + ["-sporkkey=91vbXGMSWKGHom62986XtL1q2mQDA12ngcuUNNe5NfMSj44j7g3", "-sporkaddr=ygcG5S2pQz2U1UAaHvU6EznKZW7yapKMA7", "-sporkaddr=yfLSXFfipnkgYioD6L8aUNyfRgEBuJv48h", "-sporkaddr=yNsMZhEhYqv14TgdYb1NS2UmNZjE8FSJxa", @@ -67,7 +67,7 @@ def setup_network(self): "-sporkaddr=yc5TGfcHYoLCrcbVy4umsiDjsYUn39vLui", "-minsporkkeys=3"])) self.nodes.append(start_node(2, self.options.tmpdir, - ["-debug", "-sporkkey=92bxUjPT5AhgXuXJwfGGXqhomY2SdQ55MYjXyx9DZNxCABCSsRH", + ["-sporkkey=92bxUjPT5AhgXuXJwfGGXqhomY2SdQ55MYjXyx9DZNxCABCSsRH", "-sporkaddr=ygcG5S2pQz2U1UAaHvU6EznKZW7yapKMA7", "-sporkaddr=yfLSXFfipnkgYioD6L8aUNyfRgEBuJv48h", "-sporkaddr=yNsMZhEhYqv14TgdYb1NS2UmNZjE8FSJxa", @@ -75,7 +75,7 @@ def setup_network(self): "-sporkaddr=yc5TGfcHYoLCrcbVy4umsiDjsYUn39vLui", "-minsporkkeys=3"])) self.nodes.append(start_node(3, self.options.tmpdir, - ["-debug", "-sporkkey=934yPXiVGf4RCY2qTs2Bt5k3TEtAiAg12sMxCt8yVWbSU7p3fuD", + ["-sporkkey=934yPXiVGf4RCY2qTs2Bt5k3TEtAiAg12sMxCt8yVWbSU7p3fuD", "-sporkaddr=ygcG5S2pQz2U1UAaHvU6EznKZW7yapKMA7", "-sporkaddr=yfLSXFfipnkgYioD6L8aUNyfRgEBuJv48h", "-sporkaddr=yNsMZhEhYqv14TgdYb1NS2UmNZjE8FSJxa", @@ -83,7 +83,7 @@ def setup_network(self): "-sporkaddr=yc5TGfcHYoLCrcbVy4umsiDjsYUn39vLui", "-minsporkkeys=3"])) self.nodes.append(start_node(4, self.options.tmpdir, - ["-debug", "-sporkkey=92Cxwia363Wg2qGF1fE5z4GKi8u7r1nrWQXdtsj2ACZqaDPSihD", + ["-sporkkey=92Cxwia363Wg2qGF1fE5z4GKi8u7r1nrWQXdtsj2ACZqaDPSihD", "-sporkaddr=ygcG5S2pQz2U1UAaHvU6EznKZW7yapKMA7", "-sporkaddr=yfLSXFfipnkgYioD6L8aUNyfRgEBuJv48h", "-sporkaddr=yNsMZhEhYqv14TgdYb1NS2UmNZjE8FSJxa", diff --git a/qa/rpc-tests/nulldummy.py b/qa/rpc-tests/nulldummy.py index af7c086e6e20b..22d1f63e01b77 100755 --- a/qa/rpc-tests/nulldummy.py +++ b/qa/rpc-tests/nulldummy.py @@ -44,7 +44,7 @@ def __init__(self): def setup_network(self): # Must set the blockversion for this test self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, - extra_args=[['-debug', '-whitelist=127.0.0.1']]) + extra_args=[['-whitelist=127.0.0.1']]) def run_test(self): self.address = self.nodes[0].getnewaddress() @@ -61,29 +61,29 @@ def run_test(self): self.lastblockheight = 429 self.lastblocktime = get_mocktime() + 429 - print ("Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]") + self.log.info("Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]") test1txs = [self.create_transaction(self.nodes[0], coinbase_txid[0], self.ms_address, 49)] txid1 = self.tx_submit(self.nodes[0], test1txs[0]) test1txs.append(self.create_transaction(self.nodes[0], txid1, self.ms_address, 48)) txid2 = self.tx_submit(self.nodes[0], test1txs[1]) self.block_submit(self.nodes[0], test1txs, True) - print ("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation") + self.log.info("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation") test2tx = self.create_transaction(self.nodes[0], txid2, self.ms_address, 47) trueDummy(test2tx) txid4 = self.tx_submit(self.nodes[0], test2tx, NULLDUMMY_ERROR) - print ("Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]") + self.log.info("Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]") self.block_submit(self.nodes[0], [test2tx], True) - print ("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation") + self.log.info("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation") test4tx = self.create_transaction(self.nodes[0], txid4, self.address, 46) test6txs=[CTransaction(test4tx)] trueDummy(test4tx) self.tx_submit(self.nodes[0], test4tx, NULLDUMMY_ERROR) self.block_submit(self.nodes[0], [test4tx]) - print ("Test 6: NULLDUMMY compliant transactions should be accepted to mempool and in block after activation [432]") + self.log.info("Test 6: NULLDUMMY compliant transactions should be accepted to mempool and in block after activation [432]") for i in test6txs: self.tx_submit(self.nodes[0], i) self.block_submit(self.nodes[0], test6txs, True) diff --git a/qa/rpc-tests/p2p-acceptblock.py b/qa/rpc-tests/p2p-acceptblock.py index 55e65ce435d77..dfd463d5d62c3 100755 --- a/qa/rpc-tests/p2p-acceptblock.py +++ b/qa/rpc-tests/p2p-acceptblock.py @@ -119,10 +119,10 @@ def setup_network(self): # from peers which are not whitelisted, while Node1 will be used for # the whitelisted case. self.nodes = [] - self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"], + self.nodes.append(start_node(0, self.options.tmpdir, binary=self.options.testbinary)) self.nodes.append(start_node(1, self.options.tmpdir, - ["-debug", "-whitelist=127.0.0.1"], + ["-whitelist=127.0.0.1"], binary=self.options.testbinary)) def run_test(self): @@ -160,7 +160,7 @@ def run_test(self): [ x.sync_with_ping() for x in [test_node, white_node] ] assert_equal(self.nodes[0].getblockcount(), 2) assert_equal(self.nodes[1].getblockcount(), 2) - print("First height 2 block accepted by both nodes") + self.log.info("First height 2 block accepted by both nodes") # 3. Send another block that builds on the original tip. blocks_h2f = [] # Blocks at height 2 that fork off the main chain @@ -179,7 +179,7 @@ def run_test(self): if x['hash'] == blocks_h2f[1].hash: assert_equal(x['status'], "valid-headers") - print("Second height 2 block accepted only from whitelisted peer") + self.log.info("Second height 2 block accepted only from whitelisted peer") # 4. Now send another block that builds on the forking chain. blocks_h3 = [] @@ -198,11 +198,11 @@ def run_test(self): # But this block should be accepted by node0 since it has more work. self.nodes[0].getblock(blocks_h3[0].hash) - print("Unrequested more-work block accepted from non-whitelisted peer") + self.log.info("Unrequested more-work block accepted from non-whitelisted peer") # Node1 should have accepted and reorged. assert_equal(self.nodes[1].getblockcount(), 3) - print("Successfully reorged to length 3 chain from whitelisted peer") + self.log.info("Successfully reorged to length 3 chain from whitelisted peer") # 4b. Now mine 288 more blocks and deliver; all should be processed but # the last (height-too-high) on node0. Node1 should process the tip if @@ -233,7 +233,7 @@ def run_test(self): white_node.send_message(msg_block(tips[1])) # Now deliver the tip white_node.sync_with_ping() self.nodes[1].getblock(tips[1].hash) - print("Unrequested block far ahead of tip accepted from whitelisted peer") + self.log.info("Unrequested block far ahead of tip accepted from whitelisted peer") # 5. Test handling of unrequested block on the node that didn't process # Should still not be processed (even though it has a child that has more @@ -247,7 +247,7 @@ def run_test(self): # a getdata request for this block. test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) - print("Unrequested block that would complete more-work chain was ignored") + self.log.info("Unrequested block that would complete more-work chain was ignored") # 6. Try to get node to request the missing block. # Poke the node with an inv for block at height 3 and see if that @@ -263,14 +263,14 @@ def run_test(self): # Check that the getdata includes the right block assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256) - print("Inv at tip triggered getdata for unprocessed block") + self.log.info("Inv at tip triggered getdata for unprocessed block") # 7. Send the missing block for the third time (now it is requested) test_node.send_message(msg_block(blocks_h2f[0])) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 290) - print("Successfully reorged to longer chain from non-whitelisted peer") + self.log.info("Successfully reorged to longer chain from non-whitelisted peer") [ c.disconnect_node() for c in connections ] diff --git a/qa/rpc-tests/p2p-compactblocks.py b/qa/rpc-tests/p2p-compactblocks.py index 2e3e123b5ae8e..56990612d4e2e 100755 --- a/qa/rpc-tests/p2p-compactblocks.py +++ b/qa/rpc-tests/p2p-compactblocks.py @@ -117,8 +117,8 @@ def setup_network(self): # Start up node0 to be a version 1, pre-segwit node. self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, - [["-debug", "-logtimemicros=1", "-txindex"], - ["-debug", "-logtimemicros", "-txindex"]]) + [["-txindex"], + ["-txindex"]]) connect_nodes(self.nodes[0], 1) def build_block_on_tip(self, node): @@ -773,70 +773,70 @@ def run_test(self): # We will need UTXOs to construct transactions in later tests. self.make_utxos() - print("Running tests:") + self.log.info("Running tests:") - print("\tTesting SENDCMPCT p2p message... ") + self.log.info("Testing SENDCMPCT p2p message... ") self.test_sendcmpct(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_sendcmpct(self.nodes[1], self.second_node, 1) sync_blocks(self.nodes) - print("\tTesting compactblock construction...") + self.log.info("Testing compactblock construction...") self.test_compactblock_construction(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_compactblock_construction(self.nodes[1], self.second_node, 1) sync_blocks(self.nodes) - print("\tTesting compactblock requests... ") + self.log.info("Testing compactblock requests... ") self.test_compactblock_requests(self.nodes[0], self.test_node) sync_blocks(self.nodes) self.test_compactblock_requests(self.nodes[1], self.second_node) sync_blocks(self.nodes) - print("\tTesting getblocktxn requests...") + self.log.info("Testing getblocktxn requests...") self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_getblocktxn_requests(self.nodes[1], self.second_node, 1) sync_blocks(self.nodes) - print("\tTesting getblocktxn handler...") + self.log.info("Testing getblocktxn handler...") self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_getblocktxn_handler(self.nodes[1], self.second_node, 1) self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1) sync_blocks(self.nodes) - print("\tTesting compactblock requests/announcements not at chain tip...") + self.log.info("Testing compactblock requests/announcements not at chain tip...") self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node) sync_blocks(self.nodes) self.test_compactblocks_not_at_tip(self.nodes[1], self.second_node) self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node) sync_blocks(self.nodes) - print("\tTesting handling of incorrect blocktxn responses...") + self.log.info("Testing handling of incorrect blocktxn responses...") self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_incorrect_blocktxn_response(self.nodes[1], self.second_node, 1) sync_blocks(self.nodes) # End-to-end block relay tests - print("\tTesting end-to-end block relay...") + self.log.info("Testing end-to-end block relay...") self.request_cb_announcements(self.test_node, self.nodes[0], 1) self.request_cb_announcements(self.old_node, self.nodes[1], 1) self.request_cb_announcements(self.second_node, self.nodes[1], 1) self.test_end_to_end_block_relay(self.nodes[0], [self.second_node, self.test_node, self.old_node]) self.test_end_to_end_block_relay(self.nodes[1], [self.second_node, self.test_node, self.old_node]) - print("\tTesting handling of invalid compact blocks...") + self.log.info("Testing handling of invalid compact blocks...") self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node) self.test_invalid_tx_in_compactblock(self.nodes[1], self.second_node) self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node) - print("\tTesting reconstructing compact blocks from all peers...") + self.log.info("Testing reconstructing compact blocks from all peers...") self.test_compactblock_reconstruction_multiple_peers(self.nodes[1], self.second_node, self.old_node) sync_blocks(self.nodes) - print("\tTesting invalid index in cmpctblock message...") + self.log.info("Testing invalid index in cmpctblock message...") self.test_invalid_cmpctblock_message() diff --git a/qa/rpc-tests/p2p-instantsend.py b/qa/rpc-tests/p2p-instantsend.py index 96539191963ba..67d4598e92afe 100755 --- a/qa/rpc-tests/p2p-instantsend.py +++ b/qa/rpc-tests/p2p-instantsend.py @@ -69,7 +69,7 @@ def test_doublespend(self): # start last node self.nodes[self.isolated_idx] = start_node(self.isolated_idx, self.options.tmpdir, - ["-debug"] + self.extra_args) + self.extra_args) # send doublespend transaction to isolated node self.nodes[self.isolated_idx].sendrawtransaction(dblspnd_tx['hex']) # generate block on isolated node with doublespend transaction diff --git a/qa/rpc-tests/p2p-leaktests.py b/qa/rpc-tests/p2p-leaktests.py index debadc7485d95..44bbc5b0d4029 100755 --- a/qa/rpc-tests/p2p-leaktests.py +++ b/qa/rpc-tests/p2p-leaktests.py @@ -32,7 +32,7 @@ def send_message(self, message): def bad_message(self, message): self.unexpected_msg = True - print("should not have received message: %s" % message.command) + self.log.info("should not have received message: %s" % message.command) def on_open(self, conn): self.connected = True @@ -101,7 +101,7 @@ def __init__(self): super().__init__() self.num_nodes = 1 def setup_network(self): - extra_args = [['-debug', '-banscore='+str(banscore)] + extra_args = [['-banscore='+str(banscore)] for i in range(self.num_nodes)] self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args) diff --git a/qa/rpc-tests/p2p-mempool.py b/qa/rpc-tests/p2p-mempool.py index c6b149021151f..0aa9c90e8f3d8 100755 --- a/qa/rpc-tests/p2p-mempool.py +++ b/qa/rpc-tests/p2p-mempool.py @@ -85,7 +85,7 @@ def __init__(self): def setup_network(self): # Start a node with maxuploadtarget of 200 MB (/24h) self.nodes = [] - self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-peerbloomfilters=0"])) + self.nodes.append(start_node(0, self.options.tmpdir, ["-peerbloomfilters=0"])) def run_test(self): #connect a mininode diff --git a/qa/rpc-tests/p2p-timeouts.py b/qa/rpc-tests/p2p-timeouts.py index 7f596b6e4b4bf..498acb23fec53 100755 --- a/qa/rpc-tests/p2p-timeouts.py +++ b/qa/rpc-tests/p2p-timeouts.py @@ -53,8 +53,7 @@ def setup_network(self): self.nodes = [] # Start up node0 to be a version 1, pre-segwit node. - self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, - [["-debug", "-logtimemicros=1"]]) + self.nodes = start_nodes(self.num_nodes, self.options.tmpdir) def run_test(self): # Setup the p2p connections and start up the network thread. diff --git a/qa/rpc-tests/p2p-versionbits-warning.py b/qa/rpc-tests/p2p-versionbits-warning.py index 8e3e361fc17a8..dc714e9a4a3f6 100755 --- a/qa/rpc-tests/p2p-versionbits-warning.py +++ b/qa/rpc-tests/p2p-versionbits-warning.py @@ -72,7 +72,7 @@ def setup_network(self): # Open and close to create zero-length file with open(self.alert_filename, 'w', encoding='utf8') as _: pass - self.extra_args = [["-debug", "-logtimemicros=1", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""]] + self.extra_args = [["-alertnotify=echo %s >> \"" + self.alert_filename + "\""]] self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args) # Send numblocks blocks via peer with nVersionToUse set. diff --git a/qa/rpc-tests/preciousblock.py b/qa/rpc-tests/preciousblock.py index 2f2e40f536721..65c4d6003033a 100755 --- a/qa/rpc-tests/preciousblock.py +++ b/qa/rpc-tests/preciousblock.py @@ -39,13 +39,12 @@ def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 3 - self.extra_args = [["-debug"]] * self.num_nodes def setup_network(self): self.nodes = self.setup_nodes() def run_test(self): - print("Ensure submitblock can in principle reorg to a competing chain") + self.log.info("Ensure submitblock can in principle reorg to a competing chain") self.nodes[0].generate(1) assert_equal(self.nodes[0].getblockcount(), 1) (hashY, hashZ) = self.nodes[1].generate(2) @@ -53,62 +52,62 @@ def run_test(self): node_sync_via_rpc(self.nodes[0:3]) assert_equal(self.nodes[0].getbestblockhash(), hashZ) - print("Mine blocks A-B-C on Node 0") + self.log.info("Mine blocks A-B-C on Node 0") (hashA, hashB, hashC) = self.nodes[0].generate(3) assert_equal(self.nodes[0].getblockcount(), 5) - print("Mine competing blocks E-F-G on Node 1") + self.log.info("Mine competing blocks E-F-G on Node 1") (hashE, hashF, hashG) = self.nodes[1].generate(3) assert_equal(self.nodes[1].getblockcount(), 5) assert(hashC != hashG) - print("Connect nodes and check no reorg occurs") + self.log.info("Connect nodes and check no reorg occurs") # Submit competing blocks via RPC so any reorg should occur before we proceed (no way to wait on inaction for p2p sync) node_sync_via_rpc(self.nodes[0:2]) connect_nodes_bi(self.nodes,0,1) assert_equal(self.nodes[0].getbestblockhash(), hashC) assert_equal(self.nodes[1].getbestblockhash(), hashG) - print("Make Node0 prefer block G") + self.log.info("Make Node0 prefer block G") self.nodes[0].preciousblock(hashG) assert_equal(self.nodes[0].getbestblockhash(), hashG) - print("Make Node0 prefer block C again") + self.log.info("Make Node0 prefer block C again") self.nodes[0].preciousblock(hashC) assert_equal(self.nodes[0].getbestblockhash(), hashC) - print("Make Node1 prefer block C") + self.log.info("Make Node1 prefer block C") self.nodes[1].preciousblock(hashC) sync_chain(self.nodes[0:2]) # wait because node 1 may not have downloaded hashC assert_equal(self.nodes[1].getbestblockhash(), hashC) - print("Make Node1 prefer block G again") + self.log.info("Make Node1 prefer block G again") self.nodes[1].preciousblock(hashG) assert_equal(self.nodes[1].getbestblockhash(), hashG) - print("Make Node0 prefer block G again") + self.log.info("Make Node0 prefer block G again") self.nodes[0].preciousblock(hashG) assert_equal(self.nodes[0].getbestblockhash(), hashG) - print("Make Node1 prefer block C again") + self.log.info("Make Node1 prefer block C again") self.nodes[1].preciousblock(hashC) assert_equal(self.nodes[1].getbestblockhash(), hashC) - print("Mine another block (E-F-G-)H on Node 0 and reorg Node 1") + self.log.info("Mine another block (E-F-G-)H on Node 0 and reorg Node 1") self.nodes[0].generate(1) assert_equal(self.nodes[0].getblockcount(), 6) sync_blocks(self.nodes[0:2]) hashH = self.nodes[0].getbestblockhash() assert_equal(self.nodes[1].getbestblockhash(), hashH) - print("Node1 should not be able to prefer block C anymore") + self.log.info("Node1 should not be able to prefer block C anymore") self.nodes[1].preciousblock(hashC) assert_equal(self.nodes[1].getbestblockhash(), hashH) - print("Mine competing blocks I-J-K-L on Node 2") + self.log.info("Mine competing blocks I-J-K-L on Node 2") self.nodes[2].generate(4) assert_equal(self.nodes[2].getblockcount(), 6) hashL = self.nodes[2].getbestblockhash() - print("Connect nodes and check no reorg occurs") + self.log.info("Connect nodes and check no reorg occurs") node_sync_via_rpc(self.nodes[1:3]) connect_nodes_bi(self.nodes,1,2) connect_nodes_bi(self.nodes,0,2) assert_equal(self.nodes[0].getbestblockhash(), hashH) assert_equal(self.nodes[1].getbestblockhash(), hashH) assert_equal(self.nodes[2].getbestblockhash(), hashL) - print("Make Node1 prefer block L") + self.log.info("Make Node1 prefer block L") self.nodes[1].preciousblock(hashL) assert_equal(self.nodes[1].getbestblockhash(), hashL) - print("Make Node2 prefer block H") + self.log.info("Make Node2 prefer block H") self.nodes[2].preciousblock(hashH) assert_equal(self.nodes[2].getbestblockhash(), hashH) diff --git a/qa/rpc-tests/prioritise_transaction.py b/qa/rpc-tests/prioritise_transaction.py index 8baf6f35e7c70..5d34ca325efbf 100755 --- a/qa/rpc-tests/prioritise_transaction.py +++ b/qa/rpc-tests/prioritise_transaction.py @@ -21,8 +21,8 @@ def setup_network(self): self.nodes = [] self.is_network_split = False - self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-printpriority=1"])) - self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-printpriority=1"])) # TODO move this to extra_args when Bitcoin #10198 gets backported + self.nodes.append(start_node(0, self.options.tmpdir, ["-printpriority=1"])) + self.nodes.append(start_node(1, self.options.tmpdir, ["-printpriority=1"])) # TODO move this to extra_args when Bitcoin #10198 gets backported connect_nodes(self.nodes[0], 1) # TODO remove this when Bitcoin #10198 gets backported self.relayfee = self.nodes[0].getnetworkinfo()['relayfee'] @@ -60,7 +60,7 @@ def run_test(self): self.nodes[0].generate(1) mempool = self.nodes[0].getrawmempool() - print("Assert that prioritised transaction was mined") + self.log.info("Assert that prioritised transaction was mined") assert(txids[0][0] not in mempool) assert(txids[0][1] in mempool) @@ -92,7 +92,7 @@ def run_test(self): # High fee transaction should not have been mined, but other high fee rate # transactions should have been. mempool = self.nodes[0].getrawmempool() - print("Assert that de-prioritised transaction is still in mempool") + self.log.info("Assert that de-prioritised transaction is still in mempool") assert(high_fee_tx in mempool) for x in txids[2]: if (x != high_fee_tx): @@ -134,7 +134,7 @@ def run_test(self): # accepted. self.nodes[0].prioritisetransaction(tx2_id, 0, int(self.relayfee*COIN)) - print("Assert that prioritised free transaction is accepted to mempool") + self.log.info("Assert that prioritised free transaction is accepted to mempool") assert_equal(self.nodes[0].sendrawtransaction(tx2_hex), tx2_id) assert(tx2_id in self.nodes[0].getrawmempool()) diff --git a/qa/rpc-tests/proxy_test.py b/qa/rpc-tests/proxy_test.py index e4e231f3120a0..6b2a8ed1c7cf2 100755 --- a/qa/rpc-tests/proxy_test.py +++ b/qa/rpc-tests/proxy_test.py @@ -83,13 +83,13 @@ def setup_nodes(self): # Note: proxies are not used to connect to local nodes # this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost args = [ - ['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'], - ['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'], - ['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'], + ['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'], + ['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'], + ['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'], [] ] if self.have_ipv6: - args[3] = ['-listen', '-debug=net', '-debug=proxy', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion'] + args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion'] return start_nodes(self.num_nodes, self.options.tmpdir, extra_args=args) def node_test(self, node, proxies, auth, test_onion=True): diff --git a/qa/rpc-tests/pruning.py b/qa/rpc-tests/pruning.py index 6d778cfc5e857..cc84c8c085728 100755 --- a/qa/rpc-tests/pruning.py +++ b/qa/rpc-tests/pruning.py @@ -41,19 +41,19 @@ def setup_network(self): self.is_network_split = False # Create nodes 0 and 1 to mine - self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900)) - self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900)) + self.nodes.append(start_node(0, self.options.tmpdir, ["-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900)) + self.nodes.append(start_node(1, self.options.tmpdir, ["-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900)) # Create node 2 to test pruning - self.nodes.append(start_node(2, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-prune=550"], timewait=900)) + self.nodes.append(start_node(2, self.options.tmpdir, ["-maxreceivebuffer=20000","-prune=550"], timewait=900)) self.prunedir = self.options.tmpdir+"/node2/regtest/blocks/" # Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later) - self.nodes.append(start_node(3, self.options.tmpdir, ["-debug=0","-maxreceivebuffer=20000","-blockmaxsize=999000"], timewait=900)) - self.nodes.append(start_node(4, self.options.tmpdir, ["-debug=0","-maxreceivebuffer=20000","-blockmaxsize=999000"], timewait=900)) + self.nodes.append(start_node(3, self.options.tmpdir, ["-maxreceivebuffer=20000","-blockmaxsize=999000"], timewait=900)) + self.nodes.append(start_node(4, self.options.tmpdir, ["-maxreceivebuffer=20000","-blockmaxsize=999000"], timewait=900)) # Create nodes 5 to test wallet in prune mode, but do not connect - self.nodes.append(start_node(5, self.options.tmpdir, ["-debug=0", "-prune=550"])) + self.nodes.append(start_node(5, self.options.tmpdir, ["-prune=550"])) # Determine default relay fee self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] @@ -79,9 +79,9 @@ def create_big_chain(self): def test_height_min(self): if not os.path.isfile(self.prunedir+"blk00000.dat"): raise AssertionError("blk00000.dat is missing, pruning too early") - print("Success") - print("Though we're already using more than 550MiB, current usage:", calc_usage(self.prunedir)) - print("Mining 25 more blocks should cause the first block file to be pruned") + self.log.info("Success") + self.log.info("Though we're already using more than 550MiB, current usage: %d" % calc_usage(self.prunedir)) + self.log.info("Mining 25 more blocks should cause the first block file to be pruned") # Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this for i in range(25): mine_large_block(self.nodes[0], self.utxo_cache_0) @@ -92,22 +92,22 @@ def test_height_min(self): if time.time() - waitstart > 30: raise AssertionError("blk00000.dat not pruned when it should be") - print("Success") + self.log.info("Success") usage = calc_usage(self.prunedir) - print("Usage should be below target:", usage) + self.log.info("Usage should be below target: %d" % usage) if (usage > 550): raise AssertionError("Pruning target not being met") def create_chain_with_staleblocks(self): # Create stale blocks in manageable sized chunks - print("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds") + self.log.info("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds") for j in range(12): # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects # Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine self.stop_node(0) - self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900) + self.nodes[0]=start_node(0, self.options.tmpdir, ["-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900) # Mine 24 blocks in node 1 for i in range(24): if j == 0: @@ -124,7 +124,7 @@ def create_chain_with_staleblocks(self): connect_nodes(self.nodes[2], 0) sync_blocks(self.nodes[0:3]) - print("Usage can be over target because of high stale rate:", calc_usage(self.prunedir)) + self.log.info("Usage can be over target because of high stale rate: %d" % calc_usage(self.prunedir)) def reorg_test(self): # Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip @@ -132,14 +132,14 @@ def reorg_test(self): # Reboot node 1 to clear its mempool (hopefully make the invalidate faster) # Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks) self.stop_node(1) - self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900) + self.nodes[1]=start_node(1, self.options.tmpdir, ["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900) height = self.nodes[1].getblockcount() - print("Current block height:", height) + self.log.info("Current block height: %d" % height) invalidheight = height-287 badhash = self.nodes[1].getblockhash(invalidheight) - print("Invalidating block at height:",invalidheight,badhash) + self.log.info("Invalidating block %s at height %d" % (badhash,invalidheight)) self.nodes[1].invalidateblock(badhash) # We've now switched to our previously mined-24 block fork on node 1, but thats not what we want @@ -151,24 +151,24 @@ def reorg_test(self): curhash = self.nodes[1].getblockhash(invalidheight - 1) assert(self.nodes[1].getblockcount() == invalidheight - 1) - print("New best height", self.nodes[1].getblockcount()) + self.log.info("New best height: %d" % self.nodes[1].getblockcount()) # Reboot node1 to clear those giant tx's from mempool self.stop_node(1) - self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900) + self.nodes[1]=start_node(1, self.options.tmpdir, ["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900) - print("Generating new longer chain of 300 more blocks") + self.log.info("Generating new longer chain of 300 more blocks") self.nodes[1].generate(300) - print("Reconnect nodes") + self.log.info("Reconnect nodes") connect_nodes(self.nodes[0], 1) connect_nodes(self.nodes[2], 1) sync_blocks(self.nodes[0:3], timeout=120) - print("Verify height on node 2:",self.nodes[2].getblockcount()) - print("Usage possibly still high bc of stale blocks in block files:", calc_usage(self.prunedir)) + self.log.info("Verify height on node 2: %d" % self.nodes[2].getblockcount()) + self.log.info("Usage possibly still high bc of stale blocks in block files: %d" % calc_usage(self.prunedir)) - print("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)") + self.log.info("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)") for i in range(22): # This can be slow, so do this in multiple RPC calls to avoid # RPC timeouts. @@ -176,7 +176,7 @@ def reorg_test(self): sync_blocks(self.nodes[0:3], timeout=300) usage = calc_usage(self.prunedir) - print("Usage should be below target:", usage) + self.log.info("Usage should be below target: %d" % usage) if (usage > 550): raise AssertionError("Pruning target not being met") @@ -185,7 +185,7 @@ def reorg_test(self): def reorg_back(self): # Verify that a block on the old main chain fork has been pruned away assert_raises_jsonrpc(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash) - print("Will need to redownload block",self.forkheight) + self.log.info("Will need to redownload block %d" % self.forkheight) # Verify that we have enough history to reorg back to the fork point # Although this is more than 288 blocks, because this chain was written more recently @@ -209,14 +209,14 @@ def reorg_back(self): # At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg if self.nodes[2].getblockcount() < self.mainchainheight: blocks_to_mine = first_reorg_height + 1 - self.mainchainheight - print("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed:", blocks_to_mine) + self.log.info("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: %d" % blocks_to_mine) self.nodes[0].invalidateblock(curchainhash) assert(self.nodes[0].getblockcount() == self.mainchainheight) assert(self.nodes[0].getbestblockhash() == self.mainchainhash2) goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1] goalbestheight = first_reorg_height + 1 - print("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload") + self.log.info("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload") waitstart = time.time() while self.nodes[2].getblockcount() < goalbestheight: time.sleep(0.1) @@ -228,13 +228,13 @@ def reorg_back(self): def manual_test(self, node_number, use_timestamp): # at this point, node has 995 blocks and has not yet run in prune mode - node = self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-debug=0"], timewait=900) + node = self.nodes[node_number] = start_node(node_number, self.options.tmpdir, timewait=900) assert_equal(node.getblockcount(), 995) assert_raises_jsonrpc(-1, "not in prune mode", node.pruneblockchain, 500) self.stop_node(node_number) # now re-start in manual pruning mode - node = self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-debug=0","-prune=1"], timewait=900) + node = self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-prune=1"], timewait=900) assert_equal(node.getblockcount(), 995) def height(index): @@ -308,30 +308,30 @@ def has_block(index): # stop node, start back up with auto-prune at 550MB, make sure still runs self.stop_node(node_number) - self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-debug=0","-prune=550"], timewait=900) + self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-prune=550"], timewait=900) - print("Success") + self.log.info("Success") def wallet_test(self): # check that the pruning node's wallet is still in good shape - print("Stop and start pruning node to trigger wallet rescan") + self.log.info("Stop and start pruning node to trigger wallet rescan") self.stop_node(2) - start_node(2, self.options.tmpdir, ["-debug=1","-prune=550"]) - print("Success") + start_node(2, self.options.tmpdir, ["-prune=550"]) + self.log.info("Success") # check that wallet loads loads successfully when restarting a pruned node after IBD. # this was reported to fail in #7494. - print ("Syncing node 5 to test wallet") + self.log.info("Syncing node 5 to test wallet") connect_nodes(self.nodes[0], 5) nds = [self.nodes[0], self.nodes[5]] sync_blocks(nds, wait=5, timeout=300) self.stop_node(5) #stop and start to trigger rescan - start_node(5, self.options.tmpdir, ["-debug=1","-prune=550"]) - print ("Success") + start_node(5, self.options.tmpdir, ["-prune=550"]) + self.log.info("Success") def run_test(self): - print("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)") - print("Mining a big blockchain of 995 blocks") + self.log.info("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)") + self.log.info("Mining a big blockchain of 995 blocks") self.create_big_chain() # Chain diagram key: # * blocks on main chain @@ -346,12 +346,12 @@ def run_test(self): self.stop_node(3) self.stop_node(4) - print("Check that we haven't started pruning yet because we're below PruneAfterHeight") + self.log.info("Check that we haven't started pruning yet because we're below PruneAfterHeight") self.test_height_min() # Extend this chain past the PruneAfterHeight # N0=N1=N2 **...*(1020) - print("Check that we'll exceed disk space target if we have a very high stale block rate") + self.log.info("Check that we'll exceed disk space target if we have a very high stale block rate") self.create_chain_with_staleblocks() # Disconnect N0 # And mine a 24 block chain on N1 and a separate 25 block chain on N0 @@ -375,7 +375,7 @@ def run_test(self): self.mainchainheight = self.nodes[2].getblockcount() #1320 self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight) - print("Check that we can survive a 288 block reorg still") + self.log.info("Check that we can survive a 288 block reorg still") (self.forkheight,self.forkhash) = self.reorg_test() #(1033, ) # Now create a 288 block reorg by mining a longer chain on N1 # First disconnect N1 @@ -408,7 +408,7 @@ def run_test(self): # \ # *...**(1320) - print("Test that we can rerequest a block we previously pruned if needed for a reorg") + self.log.info("Test that we can rerequest a block we previously pruned if needed for a reorg") self.reorg_back() # Verify that N2 still has block 1033 on current chain (@), but not on main chain (*) # Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to @@ -428,16 +428,16 @@ def run_test(self): # # N1 doesn't change because 1033 on main chain (*) is invalid - print("Test manual pruning with block indices") + self.log.info("Test manual pruning with block indices") self.manual_test(3, use_timestamp=False) - print("Test manual pruning with timestamps") + self.log.info("Test manual pruning with timestamps") self.manual_test(4, use_timestamp=True) - print("Test wallet re-scan") + self.log.info("Test wallet re-scan") self.wallet_test() - print("Done") + self.log.info("Done") if __name__ == '__main__': PruneTest().main() diff --git a/qa/rpc-tests/reindex.py b/qa/rpc-tests/reindex.py index 1b547a920f5b8..0cebb0466f72f 100755 --- a/qa/rpc-tests/reindex.py +++ b/qa/rpc-tests/reindex.py @@ -31,12 +31,12 @@ def reindex(self, justchainstate=False): self.nodes[0].generate(3) blockcount = self.nodes[0].getblockcount() stop_nodes(self.nodes) - extra_args = [["-debug", "-reindex-chainstate" if justchainstate else "-reindex", "-checkblockindex=1"]] + extra_args = [["-reindex-chainstate" if justchainstate else "-reindex", "-checkblockindex=1"]] self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args) while self.nodes[0].getblockcount() < blockcount: time.sleep(0.1) assert_equal(self.nodes[0].getblockcount(), blockcount) - print("Success") + self.log.info("Success") def run_test(self): self.reindex(False) diff --git a/qa/rpc-tests/rest.py b/qa/rpc-tests/rest.py index 5d6b755732a7e..b3519f0987d38 100755 --- a/qa/rpc-tests/rest.py +++ b/qa/rpc-tests/rest.py @@ -58,7 +58,7 @@ def setup_network(self, split=False): def run_test(self): url = urllib.parse.urlparse(self.nodes[0].url) - print("Mining blocks...") + self.log.info("Mining blocks...") self.nodes[0].generate(1) self.sync_all() diff --git a/qa/rpc-tests/rpcbind_test.py b/qa/rpc-tests/rpcbind_test.py index 499fe33679046..220bf4ddd072b 100755 --- a/qa/rpc-tests/rpcbind_test.py +++ b/qa/rpc-tests/rpcbind_test.py @@ -61,7 +61,7 @@ def run_test(self): break if non_loopback_ip is None: assert(not 'This test requires at least one non-loopback IPv4 interface') - print("Using interface %s for testing" % non_loopback_ip) + self.log.info("Using interface %s for testing" % non_loopback_ip) defaultport = rpc_port(0) diff --git a/qa/rpc-tests/sendheaders.py b/qa/rpc-tests/sendheaders.py index 0f0bf87cf620c..f03042de74a10 100755 --- a/qa/rpc-tests/sendheaders.py +++ b/qa/rpc-tests/sendheaders.py @@ -229,7 +229,7 @@ def __init__(self): def setup_network(self): self.nodes = [] - self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [["-debug", "-logtimemicros=1"]]*2) + self.nodes = start_nodes(self.num_nodes, self.options.tmpdir) connect_nodes(self.nodes[0], 1) # mine count blocks and return the new tip @@ -283,7 +283,7 @@ def run_test(self): # PART 1 # 1. Mine a block; expect inv announcements each time - print("Part 1: headers don't start before sendheaders message...") + self.log.info("Part 1: headers don't start before sendheaders message...") for i in range(4): old_tip = tip tip = self.mine_blocks(1) @@ -314,8 +314,8 @@ def run_test(self): inv_node.clear_last_announcement() test_node.clear_last_announcement() - print("Part 1: success!") - print("Part 2: announce blocks with headers after sendheaders message...") + self.log.info("Part 1: success!") + self.log.info("Part 2: announce blocks with headers after sendheaders message...") # PART 2 # 2. Send a sendheaders message and test that headers announcements # commence and keep working. @@ -376,9 +376,9 @@ def run_test(self): height += 1 block_time += 1 - print("Part 2: success!") + self.log.info("Part 2: success!") - print("Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...") + self.log.info("Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...") # PART 3. Headers announcements can stop after large reorg, and resume after # getheaders or inv from peer. @@ -440,9 +440,9 @@ def run_test(self): assert_equal(inv_node.check_last_announcement(inv=[tip]), True) assert_equal(test_node.check_last_announcement(headers=[tip]), True) - print("Part 3: success!") + self.log.info("Part 3: success!") - print("Part 4: Testing direct fetch behavior...") + self.log.info("Part 4: Testing direct fetch behavior...") tip = self.mine_blocks(1) height = self.nodes[0].getblockcount() + 1 last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] @@ -523,12 +523,12 @@ def run_test(self): with mininode_lock: assert_equal(test_node.last_getdata, None) - print("Part 4: success!") + self.log.info("Part 4: success!") # Now deliver all those blocks we announced. [ test_node.send_message(msg_block(x)) for x in blocks ] - print("Part 5: Testing handling of unconnecting headers") + self.log.info("Part 5: Testing handling of unconnecting headers") # First we test that receipt of an unconnecting header doesn't prevent # chain sync. for i in range(10): @@ -595,7 +595,7 @@ def run_test(self): with mininode_lock: self.last_getheaders = True - print("Part 5: success!") + self.log.info("Part 5: success!") # Finally, check that the inv node never received a getdata request, # throughout the test diff --git a/qa/rpc-tests/smartfees.py b/qa/rpc-tests/smartfees.py index bfd11cd06e74c..d753b86011736 100755 --- a/qa/rpc-tests/smartfees.py +++ b/qa/rpc-tests/smartfees.py @@ -7,35 +7,21 @@ from collections import OrderedDict from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * +from test_framework.script import CScript, OP_1, OP_DROP, OP_2, OP_HASH160, OP_EQUAL, hash160, OP_TRUE +from test_framework.mininode import CTransaction, CTxIn, CTxOut, COutPoint, ToHex, FromHex, COIN # Construct 2 trivial P2SH's and the ScriptSigs that spend them # So we can create many many transactions without needing to spend # time signing. -P2SH_1 = "8kctg1WWKdoLveifyNnDYtRAqBPpqgL8z2" # P2SH of "OP_1 OP_DROP" -P2SH_2 = "8xp4fcNB8rz9UbZC47tv6eui1ZSPMd3iYT" # P2SH of "OP_2 OP_DROP" +redeem_script_1 = CScript([OP_1, OP_DROP]) +redeem_script_2 = CScript([OP_2, OP_DROP]) +P2SH_1 = CScript([OP_HASH160, hash160(redeem_script_1), OP_EQUAL]) +P2SH_2 = CScript([OP_HASH160, hash160(redeem_script_2), OP_EQUAL]) + # Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2 -# 4 bytes of OP_TRUE and push 2-byte redeem script of "OP_1 OP_DROP" or "OP_2 OP_DROP" -SCRIPT_SIG = ["0451025175", "0451025275"] - -class DecimalEncoder(json.JSONEncoder): - def default(self, o): - if isinstance(o, Decimal): - return float(o) - return super(DecimalEncoder, self).default(o) - -def swap_outputs_in_rawtx(rawtx, outputs, inputnum): - ''' - Since dictionaries in python are unsorted make sure that our outputs are correctly ordered. - Note: comparing strings to get "correct order" is based on the fact that - P2SH_1 string is < P2SH_2 string in this particular case. - ''' - outputs_unordered = json.dumps(outputs, cls=DecimalEncoder) - outputs_ordered = json.dumps(outputs, sort_keys=True, cls=DecimalEncoder) - if outputs_ordered != outputs_unordered: # nope, we need to do some work here - first_rawoutput = rawtx[12+82*inputnum:12+82*inputnum+64] - second_rawoutput = rawtx[12+82*inputnum+64:12+82*inputnum+64+64] - rawtx = rawtx[0:12+82*inputnum] + second_rawoutput + first_rawoutput + rawtx[12+82*inputnum+64+64:] - return rawtx +SCRIPT_SIG = [CScript([OP_TRUE, redeem_script_1]), CScript([OP_TRUE, redeem_script_2])] + +global log def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment): """ @@ -53,40 +39,30 @@ def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee rand_fee = float(fee_increment)*(1.1892**random.randint(0,28)) # Total fee ranges from min_fee to min_fee + 127*fee_increment fee = min_fee - fee_increment + satoshi_round(rand_fee) - inputs = [] + tx = CTransaction() total_in = Decimal("0.00000000") while total_in <= (amount + fee) and len(conflist) > 0: t = conflist.pop(0) total_in += t["amount"] - inputs.append({ "txid" : t["txid"], "vout" : t["vout"]} ) + tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b"")) if total_in <= amount + fee: while total_in <= (amount + fee) and len(unconflist) > 0: t = unconflist.pop(0) total_in += t["amount"] - inputs.append({ "txid" : t["txid"], "vout" : t["vout"]} ) + tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b"")) if total_in <= amount + fee: raise RuntimeError("Insufficient funds: need %d, have %d"%(amount+fee, total_in)) - outputs = {} - outputs = OrderedDict([(P2SH_1, total_in - amount - fee), - (P2SH_2, amount)]) - rawtx = from_node.createrawtransaction(inputs, outputs) - rawtx = swap_outputs_in_rawtx(rawtx, outputs, len(inputs)) - # createrawtransaction constructs a transaction that is ready to be signed. - # These transactions don't need to be signed, but we still have to insert the ScriptSig - # that will satisfy the ScriptPubKey. - completetx = rawtx[0:10] - inputnum = 0 - for inp in inputs: - completetx += rawtx[10+82*inputnum:82+82*inputnum] - completetx += SCRIPT_SIG[inp["vout"]] - completetx += rawtx[84+82*inputnum:92+82*inputnum] - inputnum += 1 - completetx += rawtx[10+82*inputnum:] - txid = from_node.sendrawtransaction(completetx, True) + tx.vout.append(CTxOut(int((total_in - amount - fee)*COIN), P2SH_1)) + tx.vout.append(CTxOut(int(amount*COIN), P2SH_2)) + # These transactions don't need to be signed, but we still have to insert + # the ScriptSig that will satisfy the ScriptPubKey. + for inp in tx.vin: + inp.scriptSig = SCRIPT_SIG[inp.prevout.n] + txid = from_node.sendrawtransaction(ToHex(tx), True) unconflist.append({ "txid" : txid, "vout" : 0 , "amount" : total_in - amount - fee}) unconflist.append({ "txid" : txid, "vout" : 1 , "amount" : amount}) - return (completetx, fee) + return (ToHex(tx), fee) def split_inputs(from_node, txins, txouts, initial_split = False): """ @@ -96,19 +72,21 @@ def split_inputs(from_node, txins, txouts, initial_split = False): which splits the value into 2 outputs which are appended to txouts. """ prevtxout = txins.pop() - inputs = [] - inputs.append({ "txid" : prevtxout["txid"], "vout" : prevtxout["vout"] }) + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(int(prevtxout["txid"], 16), prevtxout["vout"]), b"")) + half_change = satoshi_round(prevtxout["amount"]/2) - rem_change = prevtxout["amount"] - half_change - Decimal("0.00010000") - outputs = OrderedDict([(P2SH_1, half_change), (P2SH_2, rem_change)]) - rawtx = from_node.createrawtransaction(inputs, outputs) - rawtx = swap_outputs_in_rawtx(rawtx, outputs, len(inputs)) + rem_change = prevtxout["amount"] - half_change - Decimal("0.00001000") + tx.vout.append(CTxOut(int(half_change*COIN), P2SH_1)) + tx.vout.append(CTxOut(int(rem_change*COIN), P2SH_2)) + # If this is the initial split we actually need to sign the transaction - # Otherwise we just need to insert the property ScriptSig + # Otherwise we just need to insert the proper ScriptSig if (initial_split) : - completetx = from_node.signrawtransaction(rawtx)["hex"] + completetx = from_node.signrawtransaction(ToHex(tx))["hex"] else : - completetx = rawtx[0:82] + SCRIPT_SIG[prevtxout["vout"]] + rawtx[84:] + tx.vin[0].scriptSig = SCRIPT_SIG[prevtxout["vout"]] + completetx = ToHex(tx) txid = from_node.sendrawtransaction(completetx, True) txouts.append({ "txid" : txid, "vout" : 0 , "amount" : half_change}) txouts.append({ "txid" : txid, "vout" : 1 , "amount" : rem_change}) @@ -120,7 +98,7 @@ def check_estimates(node, fees_seen, max_invalid, print_estimates = True): """ all_estimates = [ node.estimatefee(i) for i in range(1,26) ] if print_estimates: - print([str(all_estimates[e-1]) for e in [1,2,3,6,15,25]]) + log.info([str(all_estimates[e-1]) for e in [1,2,3,6,15,25]]) delta = 1.0e-6 # account for rounding error last_e = max(fees_seen) for e in [x for x in all_estimates if x >= 0]: @@ -180,8 +158,8 @@ def setup_network(self): self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000", "-whitelist=127.0.0.1"])) - print("This test is time consuming, please be patient") - print("Splitting inputs to small size so we can generate low priority tx's") + self.log.info("This test is time consuming, please be patient") + self.log.info("Splitting inputs to small size so we can generate low priority tx's") self.txouts = [] self.txouts2 = [] # Split a coinbase into two transaction puzzle outputs @@ -206,7 +184,7 @@ def setup_network(self): while (len(self.nodes[0].getrawmempool()) > 0): self.nodes[0].generate(1) reps += 1 - print("Finished splitting") + self.log.info("Finished splitting") # Now we can connect the other nodes, didn't want to connect them earlier # so the estimates would not be affected by the splitting transactions @@ -216,7 +194,7 @@ def setup_network(self): # (17k is room enough for 110 or so transactions) self.nodes.append(start_node(1, self.options.tmpdir, ["-blockprioritysize=1500", "-blockmaxsize=17000", - "-maxorphantx=1000", "-debug=estimatefee"])) + "-maxorphantx=1000"])) connect_nodes(self.nodes[1], 0) # Node2 is a stingy miner, that @@ -257,18 +235,21 @@ def transact_and_mine(self, numblocks, mining_node): self.memutxo = newmem def run_test(self): + # Make log handler available to helper functions + global log + log = self.log self.fees_per_kb = [] self.memutxo = [] self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting - print("Will output estimates for 1/2/3/6/15/25 blocks") + self.log.info("Will output estimates for 1/2/3/6/15/25 blocks") for i in range(2): - print("Creating transactions and mining them with a block size that can't keep up") + self.log.info("Creating transactions and mining them with a block size that can't keep up") # Create transactions and mine 10 small blocks with node 2, but create txs faster than we can mine self.transact_and_mine(10, self.nodes[2]) check_estimates(self.nodes[1], self.fees_per_kb, 14) - print("Creating transactions and mining them at a block size that is just big enough") + self.log.info("Creating transactions and mining them at a block size that is just big enough") # Generate transactions while mining 10 more blocks, this time with node1 # which mines blocks with capacity just above the rate that transactions are being created self.transact_and_mine(10, self.nodes[1]) @@ -279,7 +260,7 @@ def run_test(self): self.nodes[1].generate(1) sync_blocks(self.nodes[0:3], wait=.1) - print("Final estimates after emptying mempools") + self.log.info("Final estimates after emptying mempools") check_estimates(self.nodes[1], self.fees_per_kb, 2) if __name__ == '__main__': diff --git a/qa/rpc-tests/spentindex.py b/qa/rpc-tests/spentindex.py index 8de1b3ca1fa4a..d01624a838444 100755 --- a/qa/rpc-tests/spentindex.py +++ b/qa/rpc-tests/spentindex.py @@ -23,11 +23,11 @@ def __init__(self): def setup_network(self): self.nodes = [] # Nodes 0/1 are "wallet" nodes - self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"])) - self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-spentindex"])) + self.nodes.append(start_node(0, self.options.tmpdir)) + self.nodes.append(start_node(1, self.options.tmpdir, ["-spentindex"])) # Nodes 2/3 are used for testing - self.nodes.append(start_node(2, self.options.tmpdir, ["-debug", "-spentindex"])) - self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-spentindex", "-txindex"])) + self.nodes.append(start_node(2, self.options.tmpdir, ["-spentindex"])) + self.nodes.append(start_node(3, self.options.tmpdir, ["-spentindex", "-txindex"])) connect_nodes(self.nodes[0], 1) connect_nodes(self.nodes[0], 2) connect_nodes(self.nodes[0], 3) @@ -36,7 +36,7 @@ def setup_network(self): self.sync_all() def run_test(self): - print("Mining blocks...") + self.log.info("Mining blocks...") self.nodes[0].generate(105) self.sync_all() @@ -44,7 +44,7 @@ def run_test(self): assert_equal(chain_height, 105) # Check that - print("Testing spent index...") + self.log.info("Testing spent index...") privkey = "cU4zhap7nPJAWeMFu4j6jLrfPmqakDAzy8zn8Fhb3oEevdm4e5Lc" address = "yeMpGzMj3rhtnz48XsfpB8itPHhHtgxLc3" @@ -62,7 +62,7 @@ def run_test(self): self.nodes[0].generate(1) self.sync_all() - print("Testing getspentinfo method...") + self.log.info("Testing getspentinfo method...") # Check that the spentinfo works standalone info = self.nodes[1].getspentinfo({"txid": unspent[0]["txid"], "index": unspent[0]["vout"]}) @@ -70,7 +70,7 @@ def run_test(self): assert_equal(info["index"], 0) assert_equal(info["height"], 106) - print("Testing getrawtransaction method...") + self.log.info("Testing getrawtransaction method...") # Check that verbose raw transaction includes spent info txVerbose = self.nodes[3].getrawtransaction(unspent[0]["txid"], 1) @@ -112,7 +112,7 @@ def run_test(self): assert_equal(txVerbose4["vin"][0]["value"], Decimal(unspent[0]["amount"])) assert_equal(txVerbose4["vin"][0]["valueSat"], amount) - print("Passed\n") + self.log.info("Passed") if __name__ == '__main__': diff --git a/qa/rpc-tests/sporks.py b/qa/rpc-tests/sporks.py index f556468df08e3..77d705cd0dffb 100755 --- a/qa/rpc-tests/sporks.py +++ b/qa/rpc-tests/sporks.py @@ -22,11 +22,9 @@ def setup_network(self): disable_mocktime() self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, - ["-debug", "-sporkkey=cP4EKFyJsHT39LDqgdcB43Y3YXjNyjb5Fuas1GQSeAtjnZWmZEQK"])) - self.nodes.append(start_node(1, self.options.tmpdir, - ["-debug"])) - self.nodes.append(start_node(2, self.options.tmpdir, - ["-debug"])) + ["-sporkkey=cP4EKFyJsHT39LDqgdcB43Y3YXjNyjb5Fuas1GQSeAtjnZWmZEQK"])) + self.nodes.append(start_node(1, self.options.tmpdir)) + self.nodes.append(start_node(2, self.options.tmpdir)) # connect only 2 first nodes at start connect_nodes(self.nodes[0], 1) @@ -65,8 +63,8 @@ def run_test(self): # restart nodes to check spork persistence stop_node(self.nodes[0], 0) stop_node(self.nodes[1], 1) - self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug"]) - self.nodes[1] = start_node(1, self.options.tmpdir, ["-debug"]) + self.nodes[0] = start_node(0, self.options.tmpdir) + self.nodes[1] = start_node(1, self.options.tmpdir) assert(not self.get_test_spork_state(self.nodes[0])) assert(not self.get_test_spork_state(self.nodes[1])) diff --git a/qa/rpc-tests/test_framework/blockstore.py b/qa/rpc-tests/test_framework/blockstore.py index 5280d18cdc606..4cfd682bb58c1 100644 --- a/qa/rpc-tests/test_framework/blockstore.py +++ b/qa/rpc-tests/test_framework/blockstore.py @@ -8,6 +8,8 @@ from io import BytesIO import dbm.dumb as dbmd +logger = logging.getLogger("TestFramework.blockstore") + class BlockStore(object): """BlockStore helper class. @@ -86,7 +88,7 @@ def add_block(self, block): try: self.blockDB[repr(block.sha256)] = bytes(block.serialize()) except TypeError as e: - print("Unexpected error: ", sys.exc_info()[0], e.args) + logger.exception("Unexpected error") self.currentBlock = block.sha256 self.headers_map[block.sha256] = CBlockHeader(block) @@ -156,7 +158,7 @@ def add_transaction(self, tx): try: self.txDB[repr(tx.sha256)] = bytes(tx.serialize()) except TypeError as e: - print("Unexpected error: ", sys.exc_info()[0], e.args) + logger.exception("Unexpected error") def get_transactions(self, inv): responses = [] diff --git a/qa/rpc-tests/test_framework/comptool.py b/qa/rpc-tests/test_framework/comptool.py index 5d4824ac4ac19..669a5c73afcc8 100755 --- a/qa/rpc-tests/test_framework/comptool.py +++ b/qa/rpc-tests/test_framework/comptool.py @@ -21,6 +21,10 @@ from .blockstore import BlockStore, TxStore from .util import p2p_port +import logging + +logger=logging.getLogger("TestFramework.comptool") + global mininode_lock class RejectResult(object): @@ -209,7 +213,6 @@ def blocks_requested(): # --> error if not requested if not wait_until(blocks_requested, attempts=20*num_blocks, sleep=0.1): - # print [ c.cb.block_request_map for c in self.connections ] raise AssertionError("Not all nodes requested block") # Send getheaders message @@ -231,7 +234,6 @@ def transaction_requested(): # --> error if not requested if not wait_until(transaction_requested, attempts=20*num_events): - # print [ c.cb.tx_request_map for c in self.connections ] raise AssertionError("Not all nodes requested transaction") # Get the mempool @@ -258,13 +260,12 @@ def check_results(self, blockhash, outcome): if c.cb.bestblockhash == blockhash: return False if blockhash not in c.cb.block_reject_map: - print('Block not in reject map: %064x' % (blockhash)) + logger.error('Block not in reject map: %064x' % (blockhash)) return False if not outcome.match(c.cb.block_reject_map[blockhash]): - print('Block rejected with %s instead of expected %s: %064x' % (c.cb.block_reject_map[blockhash], outcome, blockhash)) + logger.error('Block rejected with %s instead of expected %s: %064x' % (c.cb.block_reject_map[blockhash], outcome, blockhash)) return False elif ((c.cb.bestblockhash == blockhash) != outcome): - # print c.cb.bestblockhash, blockhash, outcome return False return True @@ -280,19 +281,17 @@ def check_mempool(self, txhash, outcome): if outcome is None: # Make sure the mempools agree with each other if c.cb.lastInv != self.connections[0].cb.lastInv: - # print c.rpc.getrawmempool() return False elif isinstance(outcome, RejectResult): # Check that tx was rejected w/ code if txhash in c.cb.lastInv: return False if txhash not in c.cb.tx_reject_map: - print('Tx not in reject map: %064x' % (txhash)) + logger.error('Tx not in reject map: %064x' % (txhash)) return False if not outcome.match(c.cb.tx_reject_map[txhash]): - print('Tx rejected with %s instead of expected %s: %064x' % (c.cb.tx_reject_map[txhash], outcome, txhash)) + logger.error('Tx rejected with %s instead of expected %s: %064x' % (c.cb.tx_reject_map[txhash], outcome, txhash)) return False elif ((txhash in c.cb.lastInv) != outcome): - # print c.rpc.getrawmempool(), c.cb.lastInv return False return True @@ -403,7 +402,7 @@ def run(self): if (not self.check_mempool(tx.sha256, tx_outcome)): raise AssertionError("Mempool test failed at test %d" % test_number) - print("Test %d: PASS" % test_number, [ c.rpc.getblockcount() for c in self.connections ]) + logger.info("Test %d: PASS" % test_number) test_number += 1 [ c.disconnect_node() for c in self.connections ] diff --git a/qa/rpc-tests/test_framework/mininode.py b/qa/rpc-tests/test_framework/mininode.py index 182a45e16c63b..8e5962345bbc6 100755 --- a/qa/rpc-tests/test_framework/mininode.py +++ b/qa/rpc-tests/test_framework/mininode.py @@ -52,6 +52,8 @@ NODE_GETUTXO = (1 << 1) NODE_BLOOM = (1 << 2) +logger = logging.getLogger("TestFramework.mininode") + # Keep our own socket map for asyncore, so that we can track disconnects # ourselves (to workaround an issue with closing an asyncore socket when # using select) @@ -1352,8 +1354,7 @@ def deliver(self, conn, message): try: getattr(self, 'on_' + message.command.decode('ascii'))(conn, message) except: - print("ERROR delivering %s (%s)" % (repr(message), - sys.exc_info()[0])) + logger.exception("ERROR delivering %s" % repr(message)) def on_version(self, conn, message): if message.nVersion >= 209: @@ -1464,7 +1465,6 @@ class NodeConn(asyncore.dispatcher): def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE_NETWORK, send_version=True): asyncore.dispatcher.__init__(self, map=mininode_socket_map) - self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport)) self.dstaddr = dstaddr self.dstport = dstport self.create_socket(socket.AF_INET, socket.SOCK_STREAM) @@ -1489,8 +1489,7 @@ def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE vt.addrFrom.port = 0 self.send_message(vt, True) - print('MiniNode: Connecting to Dash Node IP # ' + dstaddr + ':' \ - + str(dstport)) + logger.info('Connecting to Dash Node: %s:%d' % (self.dstaddr, self.dstport)) try: self.connect((dstaddr, dstport)) @@ -1498,18 +1497,14 @@ def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE self.handle_close() self.rpc = rpc - def show_debug_msg(self, msg): - self.log.debug(msg) - def handle_connect(self): if self.state != "connected": - self.show_debug_msg("MiniNode: Connected & Listening: \n") + logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport)) self.state = "connected" self.cb.on_open(self) def handle_close(self): - self.show_debug_msg("MiniNode: Closing Connection to %s:%d... " - % (self.dstaddr, self.dstport)) + logger.debug("Closing connection to: %s:%d" % (self.dstaddr, self.dstport)) self.state = "closed" self.recvbuf = b"" self.sendbuf = b"" @@ -1591,17 +1586,14 @@ def got_data(self): t.deserialize(f) self.got_message(t) else: - self.show_debug_msg("Unknown command: '" + str(command) + "' " + - repr(msg)) + logger.warning("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, str(command), repr(msg))) except Exception as e: - print('got_data:', repr(e)) - # import traceback - # traceback.print_tb(sys.exc_info()[2]) + logger.exception('got_data:', repr(e)) def send_message(self, message, pushbuf=False): if self.state != "connected" and not pushbuf: raise IOError('Not connected, no pushbuf') - self.show_debug_msg("Send %s" % repr(message)) + logger.debug("Send message to %s:%d: %s" % (self.dstaddr, self.dstport, repr(message))) command = message.command data = message.serialize() tmsg = self.MAGIC_BYTES[self.network] @@ -1623,7 +1615,7 @@ def got_message(self, message): self.messagemap[b'ping'] = msg_ping_prebip31 if self.last_sent + 30 * 60 < time.time(): self.send_message(self.messagemap[b'ping']()) - self.show_debug_msg("Recv %s" % repr(message)) + logger.debug("Received message from %s:%d: %s" % (self.dstaddr, self.dstport, repr(message))) self.cb.deliver(self, message) def disconnect_node(self): diff --git a/qa/rpc-tests/test_framework/test_framework.py b/qa/rpc-tests/test_framework/test_framework.py index e0d5fbdd84942..0214c64a0fd5f 100755 --- a/qa/rpc-tests/test_framework/test_framework.py +++ b/qa/rpc-tests/test_framework/test_framework.py @@ -55,7 +55,7 @@ def add_options(self, parser): pass def setup_chain(self): - print("Initializing test directory "+self.options.tmpdir) + self.log.info("Initializing test directory "+self.options.tmpdir) if self.setup_clean_chain: initialize_chain_clean(self.options.tmpdir, self.num_nodes) set_genesis_mocktime() @@ -127,6 +127,8 @@ def main(self): help="Directory for caching pregenerated datadirs") parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"), help="Root directory for datadirs") + parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO", + help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.") parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true", help="Print out all RPC calls as they are made") parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int', @@ -139,9 +141,6 @@ def main(self): # backup dir variable for removal at cleanup self.options.root, self.options.tmpdir = self.options.tmpdir, self.options.tmpdir + '/' + str(self.options.port_seed) - if self.options.trace_rpc: - logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) - if self.options.coveragedir: enable_coverage(self.options.coveragedir) @@ -151,46 +150,45 @@ def main(self): check_json_precision() + # Set up temp directory and start logging + os.makedirs(self.options.tmpdir, exist_ok=False) + self._start_logging() + success = False + try: - os.makedirs(self.options.tmpdir, exist_ok=False) self.setup_chain() self.setup_network() self.run_test() success = True except JSONRPCException as e: - print("JSONRPC error: "+e.error['message']) - traceback.print_tb(sys.exc_info()[2]) + self.log.exception("JSONRPC error") except AssertionError as e: - print("Assertion failed: " + str(e)) - traceback.print_tb(sys.exc_info()[2]) + self.log.exception("Assertion failed") except KeyError as e: - print("key not found: "+ str(e)) - traceback.print_tb(sys.exc_info()[2]) + self.log.exception("Key error") except Exception as e: - print("Unexpected exception caught during testing: " + repr(e)) - traceback.print_tb(sys.exc_info()[2]) + self.log.exception("Unexpected exception caught during testing") except KeyboardInterrupt as e: - print("Exiting after " + repr(e)) + self.log.warning("Exiting after keyboard interrupt") if not self.options.noshutdown: - print("Stopping nodes") + self.log.info("Stopping nodes") try: stop_nodes(self.nodes) except BaseException as e: success = False - print("Unexpected exception caught during shutdown: " + repr(e)) - traceback.print_tb(sys.exc_info()[2]) + self.log.exception("Unexpected exception caught during shutdown") else: - print("Note: dashds were not stopped and may still be running") + self.log.info("Note: dashds were not stopped and may still be running") if not self.options.nocleanup and not self.options.noshutdown and success: - print("Cleaning up") + self.log.info("Cleaning up") shutil.rmtree(self.options.tmpdir) if not os.listdir(self.options.root): os.rmdir(self.options.root) else: - print("Not cleaning up dir %s" % self.options.tmpdir) + self.log.warning("Not cleaning up dir %s" % self.options.tmpdir) if os.getenv("PYTHON_DEBUG", ""): # Dump the end of the debug logs, to aid in debugging rare # travis failures. @@ -202,12 +200,40 @@ def main(self): from collections import deque print("".join(deque(open(f), MAX_LINES_TO_PRINT))) if success: - print("Tests successful") + self.log.info("Tests successful") sys.exit(0) else: - print("Failed") + self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir) + logging.shutdown() sys.exit(1) + def _start_logging(self): + # Add logger and logging handlers + self.log = logging.getLogger('TestFramework') + self.log.setLevel(logging.DEBUG) + # Create file handler to log all messages + fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log') + fh.setLevel(logging.DEBUG) + # Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel. + ch = logging.StreamHandler(sys.stdout) + # User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int + ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper() + ch.setLevel(ll) + # Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted) + formatter = logging.Formatter(fmt = '%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S') + fh.setFormatter(formatter) + ch.setFormatter(formatter) + # add the handlers to the logger + self.log.addHandler(fh) + self.log.addHandler(ch) + + if self.options.trace_rpc: + rpc_logger = logging.getLogger("BitcoinRPC") + rpc_logger.setLevel(logging.DEBUG) + rpc_handler = logging.StreamHandler(sys.stdout) + rpc_handler.setLevel(logging.DEBUG) + rpc_logger.addHandler(rpc_handler) + MASTERNODE_COLLATERAL = 1000 @@ -580,6 +606,6 @@ def add_options(self, parser): def setup_network(self): self.nodes = start_nodes( self.num_nodes, self.options.tmpdir, - extra_args=[['-debug', '-whitelist=127.0.0.1']] * self.num_nodes, + extra_args=[['-whitelist=127.0.0.1']] * self.num_nodes, binary=[self.options.testbinary] + [self.options.refbinary]*(self.num_nodes-1)) diff --git a/qa/rpc-tests/test_framework/util.py b/qa/rpc-tests/test_framework/util.py index c2862827961f7..079f924fad785 100644 --- a/qa/rpc-tests/test_framework/util.py +++ b/qa/rpc-tests/test_framework/util.py @@ -20,12 +20,15 @@ import time import re import errno +import logging from . import coverage from .authproxy import AuthServiceProxy, JSONRPCException COVERAGE_DIR = None +logger = logging.getLogger("TestFramework.utils") + # The maximum number of nodes a single test can spawn MAX_NODES = 15 # Don't assign rpc or p2p ports lower than this @@ -261,6 +264,7 @@ def initialize_chain(test_dir, num_nodes, cachedir, extra_args=None, redirect_st break if create_cache: + logger.debug("Creating data directories from cached datadir") #find and delete old cache directories if any exist for i in range(MAX_NODES): @@ -280,11 +284,9 @@ def initialize_chain(test_dir, num_nodes, cachedir, extra_args=None, redirect_st if redirect_stderr: stderr = sys.stdout bitcoind_processes[i] = subprocess.Popen(args, stderr=stderr) - if os.getenv("PYTHON_DEBUG", ""): - print("initialize_chain: dashd started, waiting for RPC to come up") + logger.debug("initialize_chain: dashd started, waiting for RPC to come up") wait_for_bitcoind_start(bitcoind_processes[i], rpc_url(i), i) - if os.getenv("PYTHON_DEBUG", ""): - print("initialize_chain: RPC successfully started") + logger.debug("initialize_chain: RPC successfully started") rpcs = [] for i in range(MAX_NODES): @@ -341,7 +343,7 @@ def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary= if binary is None: binary = os.getenv("BITCOIND", "dashd") # RPC tests still depend on free transactions - args = [ binary, "-datadir="+datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-blockprioritysize=50000", "-mocktime="+str(get_mocktime()) ] + args = [ binary, "-datadir="+datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-blockprioritysize=50000", "-logtimemicros", "-debug", "-mocktime="+str(get_mocktime()) ] # Don't try auto backups (they fail a lot when running tests) args += [ "-createwalletbackups=0" ] if extra_args is not None: args.extend(extra_args) @@ -352,12 +354,10 @@ def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary= stderr = sys.stdout bitcoind_processes[i] = subprocess.Popen(args, stderr=stderr) - if os.getenv("PYTHON_DEBUG", ""): - print("start_node: dashd started, waiting for RPC to come up") + logger.debug("initialize_chain: dashd started, waiting for RPC to come up") url = rpc_url(i, rpchost) wait_for_bitcoind_start(bitcoind_processes[i], url, i) - if os.getenv("PYTHON_DEBUG", ""): - print("start_node: RPC successfully started") + logger.debug("initialize_chain: RPC successfully started") proxy = get_rpc_proxy(url, i, timeout=timewait) if COVERAGE_DIR: @@ -423,10 +423,11 @@ def wait_node(i): del bitcoind_processes[i] def stop_node(node, i, wait=True): + logger.debug("Stopping node %d" % i) try: node.stop() except http.client.CannotSendRequest as e: - print("WARN: Unable to stop node: " + repr(e)) + logger.exception("Unable to stop node") if wait: wait_node(i) diff --git a/qa/rpc-tests/timestampindex.py b/qa/rpc-tests/timestampindex.py index 60cef140a5fbb..5f615008d89fb 100755 --- a/qa/rpc-tests/timestampindex.py +++ b/qa/rpc-tests/timestampindex.py @@ -21,11 +21,11 @@ def __init__(self): def setup_network(self): self.nodes = [] # Nodes 0/1 are "wallet" nodes - self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"])) - self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-timestampindex"])) + self.nodes.append(start_node(0, self.options.tmpdir)) + self.nodes.append(start_node(1, self.options.tmpdir, ["-timestampindex"])) # Nodes 2/3 are used for testing - self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"])) - self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-timestampindex"])) + self.nodes.append(start_node(2, self.options.tmpdir)) + self.nodes.append(start_node(3, self.options.tmpdir, ["-timestampindex"])) connect_nodes(self.nodes[0], 1) connect_nodes(self.nodes[0], 2) connect_nodes(self.nodes[0], 3) @@ -34,16 +34,16 @@ def setup_network(self): self.sync_all() def run_test(self): - print("Mining 5 blocks...") + self.log.info("Mining 5 blocks...") blockhashes = self.nodes[0].generate(5) low = self.nodes[0].getblock(blockhashes[0])["time"] high = self.nodes[0].getblock(blockhashes[4])["time"] self.sync_all() - print("Checking timestamp index...") + self.log.info("Checking timestamp index...") hashes = self.nodes[1].getblockhashes(high, low) assert_equal(len(hashes), 5) assert_equal(sorted(blockhashes), sorted(hashes)) - print("Passed\n") + self.log.info("Passed") if __name__ == '__main__': diff --git a/qa/rpc-tests/txindex.py b/qa/rpc-tests/txindex.py index a1b85b3312f01..156017f63eb42 100755 --- a/qa/rpc-tests/txindex.py +++ b/qa/rpc-tests/txindex.py @@ -23,11 +23,11 @@ def __init__(self): def setup_network(self): self.nodes = [] # Nodes 0/1 are "wallet" nodes - self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"])) - self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-txindex"])) + self.nodes.append(start_node(0, self.options.tmpdir)) + self.nodes.append(start_node(1, self.options.tmpdir, ["-txindex"])) # Nodes 2/3 are used for testing - self.nodes.append(start_node(2, self.options.tmpdir, ["-debug", "-txindex"])) - self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-txindex"])) + self.nodes.append(start_node(2, self.options.tmpdir, ["-txindex"])) + self.nodes.append(start_node(3, self.options.tmpdir, ["-txindex"])) connect_nodes(self.nodes[0], 1) connect_nodes(self.nodes[0], 2) connect_nodes(self.nodes[0], 3) @@ -36,14 +36,14 @@ def setup_network(self): self.sync_all() def run_test(self): - print("Mining blocks...") + self.log.info("Mining blocks...") self.nodes[0].generate(105) self.sync_all() chain_height = self.nodes[1].getblockcount() assert_equal(chain_height, 105) - print("Testing transaction index...") + self.log.info("Testing transaction index...") privkey = "cU4zhap7nPJAWeMFu4j6jLrfPmqakDAzy8zn8Fhb3oEevdm4e5Lc" address = "yeMpGzMj3rhtnz48XsfpB8itPHhHtgxLc3" @@ -66,7 +66,7 @@ def run_test(self): assert_equal(verbose["vout"][0]["valueSat"], 5000000000); assert_equal(verbose["vout"][0]["value"], 50); - print("Passed\n") + self.log.info("Passed") if __name__ == '__main__': diff --git a/qa/rpc-tests/wallet-hd.py b/qa/rpc-tests/wallet-hd.py index 5a4596320e815..0df5335294aec 100755 --- a/qa/rpc-tests/wallet-hd.py +++ b/qa/rpc-tests/wallet-hd.py @@ -71,7 +71,7 @@ def run_test (self): self.sync_all() assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1) - print("Restore backup ...") + self.log.info("Restore backup ...") stop_node(self.nodes[1],1) os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat") shutil.copyfile(tmpdir + "/hd.bak", tmpdir + "/node1/regtest/wallet.dat") diff --git a/qa/rpc-tests/wallet.py b/qa/rpc-tests/wallet.py index fa70171fcf176..cb34cafe77a3c 100755 --- a/qa/rpc-tests/wallet.py +++ b/qa/rpc-tests/wallet.py @@ -35,7 +35,7 @@ def run_test (self): assert_equal(len(self.nodes[1].listunspent()), 0) assert_equal(len(self.nodes[2].listunspent()), 0) - print("Mining blocks...") + self.log.info("Mining blocks...") self.nodes[0].generate(1) @@ -333,7 +333,7 @@ def run_test (self): ] chainlimit = 6 for m in maintenance: - print("check " + m) + self.log.info("check " + m) stop_nodes(self.nodes) # set lower ancestor limit for later self.nodes = start_nodes(3, self.options.tmpdir, [[m, "-limitancestorcount="+str(chainlimit)]] * 3) diff --git a/qa/rpc-tests/walletbackup.py b/qa/rpc-tests/walletbackup.py index dd212cffbd81b..65331a707e629 100755 --- a/qa/rpc-tests/walletbackup.py +++ b/qa/rpc-tests/walletbackup.py @@ -34,8 +34,6 @@ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from random import randint -import logging -logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO, stream=sys.stdout) class WalletBackupTest(BitcoinTestFramework): @@ -100,7 +98,7 @@ def erase_three(self): os.remove(self.options.tmpdir + "/node2/regtest/wallet.dat") def run_test(self): - logging.info("Generating initial blockchain") + self.log.info("Generating initial blockchain") self.nodes[0].generate(1) sync_blocks(self.nodes) self.nodes[1].generate(1) @@ -115,12 +113,12 @@ def run_test(self): assert_equal(self.nodes[2].getbalance(), 500) assert_equal(self.nodes[3].getbalance(), 0) - logging.info("Creating transactions") + self.log.info("Creating transactions") # Five rounds of sending each other transactions. for i in range(5): self.do_one_round() - logging.info("Backing up") + self.log.info("Backing up") tmpdir = self.options.tmpdir self.nodes[0].backupwallet(tmpdir + "/node0/wallet.bak") self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.dump") @@ -129,7 +127,7 @@ def run_test(self): self.nodes[2].backupwallet(tmpdir + "/node2/wallet.bak") self.nodes[2].dumpwallet(tmpdir + "/node2/wallet.dump") - logging.info("More transactions") + self.log.info("More transactions") for i in range(5): self.do_one_round() @@ -150,7 +148,7 @@ def run_test(self): ## # Test restoring spender wallets from backups ## - logging.info("Restoring using wallet.dat") + self.log.info("Restoring using wallet.dat") self.stop_three() self.erase_three() @@ -164,7 +162,7 @@ def run_test(self): shutil.copyfile(tmpdir + "/node1/wallet.bak", tmpdir + "/node1/regtest/wallet.dat") shutil.copyfile(tmpdir + "/node2/wallet.bak", tmpdir + "/node2/regtest/wallet.dat") - logging.info("Re-starting nodes") + self.log.info("Re-starting nodes") self.start_three() sync_blocks(self.nodes) @@ -172,7 +170,7 @@ def run_test(self): assert_equal(self.nodes[1].getbalance(), balance1) assert_equal(self.nodes[2].getbalance(), balance2) - logging.info("Restoring using dumped wallet") + self.log.info("Restoring using dumped wallet") self.stop_three() self.erase_three() diff --git a/qa/rpc-tests/zapwallettxes.py b/qa/rpc-tests/zapwallettxes.py index 49883bb958293..bcb201648f3be 100755 --- a/qa/rpc-tests/zapwallettxes.py +++ b/qa/rpc-tests/zapwallettxes.py @@ -32,7 +32,7 @@ def setup_network(self, split=False): self.sync_all() def run_test (self): - print("Mining blocks...") + self.log.info("Mining blocks...") self.nodes[0].generate(1) self.sync_all() self.nodes[1].generate(101) diff --git a/qa/rpc-tests/zmq_test.py b/qa/rpc-tests/zmq_test.py index 1e2f06bd54b58..e6f18b0b9332b 100755 --- a/qa/rpc-tests/zmq_test.py +++ b/qa/rpc-tests/zmq_test.py @@ -36,7 +36,7 @@ def run_test(self): genhashes = self.nodes[0].generate(1) self.sync_all() - print("listen...") + self.log.info("listen...") msg = self.zmqSubSocket.recv_multipart() topic = msg[0] assert_equal(topic, b"hashtx") diff --git a/src/Makefile.bench.include b/src/Makefile.bench.include index 60bbbadc79277..63185b5bc9299 100644 --- a/src/Makefile.bench.include +++ b/src/Makefile.bench.include @@ -29,6 +29,7 @@ bench_bench_dash_SOURCES = \ bench/lockedpool.cpp \ bench/perf.cpp \ bench/perf.h \ + bench/prevector_destructor.cpp \ bench/string_cast.cpp nodist_bench_bench_dash_SOURCES = $(GENERATED_TEST_FILES) diff --git a/src/bench/coin_selection.cpp b/src/bench/coin_selection.cpp index 9db6492e51dc4..e866afe407f72 100644 --- a/src/bench/coin_selection.cpp +++ b/src/bench/coin_selection.cpp @@ -20,7 +20,7 @@ static void addCoin(const CAmount& nValue, const CWallet& wallet, std::vector t0; + prevector<28, unsigned char> t1; + t0.resize(28); + t1.resize(29); + } + } +} + +static void PrevectorClear(benchmark::State& state) +{ + + while (state.KeepRunning()) { + for (auto x = 0; x < 1000; ++x) { + prevector<28, unsigned char> t0; + prevector<28, unsigned char> t1; + t0.resize(28); + t0.clear(); + t1.resize(29); + t0.clear(); + } + } +} + +BENCHMARK(PrevectorDestructor); +BENCHMARK(PrevectorClear); diff --git a/src/bloom.cpp b/src/bloom.cpp index edc41a6512087..d57b0df40c5c2 100644 --- a/src/bloom.cpp +++ b/src/bloom.cpp @@ -254,8 +254,8 @@ void CRollingBloomFilter::insert(const std::vector& vKey) if (nGeneration == 4) { nGeneration = 1; } - uint64_t nGenerationMask1 = -(uint64_t)(nGeneration & 1); - uint64_t nGenerationMask2 = -(uint64_t)(nGeneration >> 1); + uint64_t nGenerationMask1 = 0 - (uint64_t)(nGeneration & 1); + uint64_t nGenerationMask2 = 0 - (uint64_t)(nGeneration >> 1); /* Wipe old entries that used this generation number. */ for (uint32_t p = 0; p < data.size(); p += 2) { uint64_t p1 = data[p], p2 = data[p + 1]; diff --git a/src/pow.cpp b/src/pow.cpp index d51025fdce7ff..7e55e3d39c517 100644 --- a/src/pow.cpp +++ b/src/pow.cpp @@ -146,12 +146,9 @@ unsigned int static DarkGravityWave(const CBlockIndex* pindexLast, const CBlockH unsigned int GetNextWorkRequiredBTC(const CBlockIndex* pindexLast, const CBlockHeader *pblock, const Consensus::Params& params) { + assert(pindexLast != NULL); unsigned int nProofOfWorkLimit = UintToArith256(params.powLimit).GetCompact(); - // Genesis block - if (pindexLast == NULL) - return nProofOfWorkLimit; - // Only change once per interval if ((pindexLast->nHeight+1) % params.DifficultyAdjustmentInterval() != 0) { diff --git a/src/prevector.h b/src/prevector.h index 8b4fff4cd211d..717d42f92b687 100644 --- a/src/prevector.h +++ b/src/prevector.h @@ -11,6 +11,7 @@ #include #include +#include #pragma pack(push, 1) /** Implements a drop-in replacement for std::vector which stores up to N @@ -388,10 +389,14 @@ class prevector { iterator erase(iterator first, iterator last) { iterator p = first; char* endp = (char*)&(*end()); - while (p != last) { - (*p).~T(); - _size--; - ++p; + if (!std::is_trivially_destructible::value) { + while (p != last) { + (*p).~T(); + _size--; + ++p; + } + } else { + _size -= last - p; } memmove(&(*first), &(*last), endp - ((char*)(&(*last)))); return first; @@ -432,7 +437,9 @@ class prevector { } ~prevector() { - clear(); + if (!std::is_trivially_destructible::value) { + clear(); + } if (!is_direct()) { free(_union.indirect); _union.indirect = NULL; diff --git a/src/qt/walletmodel.cpp b/src/qt/walletmodel.cpp index 487bcca3caec0..a5f18026ebf26 100644 --- a/src/qt/walletmodel.cpp +++ b/src/qt/walletmodel.cpp @@ -676,7 +676,7 @@ void WalletModel::getOutputs(const std::vector& vOutpoints, std::vect if (!wallet->mapWallet.count(outpoint.hash)) continue; int nDepth = wallet->mapWallet[outpoint.hash].GetDepthInMainChain(); if (nDepth < 0) continue; - COutput out(&wallet->mapWallet[outpoint.hash], outpoint.n, nDepth, true, true); + COutput out(&wallet->mapWallet[outpoint.hash], outpoint.n, nDepth, true /* spendable */, true /* solvable */, true /* safe */); vOutputs.push_back(out); } } @@ -703,7 +703,7 @@ void WalletModel::listCoins(std::map >& mapCoins) if (!wallet->mapWallet.count(outpoint.hash)) continue; int nDepth = wallet->mapWallet[outpoint.hash].GetDepthInMainChain(); if (nDepth < 0) continue; - COutput out(&wallet->mapWallet[outpoint.hash], outpoint.n, nDepth, true, true); + COutput out(&wallet->mapWallet[outpoint.hash], outpoint.n, nDepth, true /* spendable */, true /* solvable */, true /* safe */); if (outpoint.n < out.tx->tx->vout.size() && wallet->IsMine(out.tx->tx->vout[outpoint.n]) == ISMINE_SPENDABLE) vCoins.push_back(out); } @@ -715,7 +715,7 @@ void WalletModel::listCoins(std::map >& mapCoins) while (wallet->IsChange(cout.tx->tx->vout[cout.i]) && cout.tx->tx->vin.size() > 0 && wallet->IsMine(cout.tx->tx->vin[0])) { if (!wallet->mapWallet.count(cout.tx->tx->vin[0].prevout.hash)) break; - cout = COutput(&wallet->mapWallet[cout.tx->tx->vin[0].prevout.hash], cout.tx->tx->vin[0].prevout.n, 0, true, true); + cout = COutput(&wallet->mapWallet[cout.tx->tx->vin[0].prevout.hash], cout.tx->tx->vin[0].prevout.n, 0 /* depth */, true /* spendable */, true /* solvable */, true /* safe */); } CTxDestination address; diff --git a/src/rpc/protocol.h b/src/rpc/protocol.h index 90d7815be5f3b..f10a0e2813011 100644 --- a/src/rpc/protocol.h +++ b/src/rpc/protocol.h @@ -39,7 +39,7 @@ enum RPCErrorCode RPC_METHOD_NOT_FOUND = -32601, RPC_INVALID_PARAMS = -32602, // RPC_INTERNAL_ERROR should only be used for genuine errors in bitcoind - // (for exampled datadir corruption). + // (for example datadir corruption). RPC_INTERNAL_ERROR = -32603, RPC_PARSE_ERROR = -32700, diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp index 00dc73ac788fa..4ca9621e531e7 100644 --- a/src/test/util_tests.cpp +++ b/src/test/util_tests.cpp @@ -321,7 +321,7 @@ BOOST_AUTO_TEST_CASE(test_ParseInt32) BOOST_CHECK(ParseInt32("1234", &n) && n == 1234); BOOST_CHECK(ParseInt32("01234", &n) && n == 1234); // no octal BOOST_CHECK(ParseInt32("2147483647", &n) && n == 2147483647); - BOOST_CHECK(ParseInt32("-2147483648", &n) && n == -2147483648); + BOOST_CHECK(ParseInt32("-2147483648", &n) && n == (-2147483647 - 1)); // (-2147483647 - 1) equals INT_MIN BOOST_CHECK(ParseInt32("-1234", &n) && n == -1234); // Invalid values BOOST_CHECK(!ParseInt32("", &n)); diff --git a/src/util.cpp b/src/util.cpp index f4800678ecb8c..a232f7663246d 100644 --- a/src/util.cpp +++ b/src/util.cpp @@ -293,7 +293,7 @@ bool LogAcceptCategory(const char* category) ptrCategory.reset(new std::set()); } } - const std::set& setCategories = *ptrCategory.get(); + const std::set& setCategories = *ptrCategory; // if not debugging everything and not debugging specific category, LogPrint does nothing. if (setCategories.count(std::string("")) == 0 && diff --git a/src/validation.cpp b/src/validation.cpp index ea042a42ddbf4..bf2e337e1201a 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -1909,7 +1909,10 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd CCoinsViewCache& view, const CChainParams& chainparams, bool fJustCheck = false) { AssertLockHeld(cs_main); - + assert(pindex); + // pindex->phashBlock can be null if called by CreateNewBlock/TestBlockValidity + assert((pindex->phashBlock == NULL) || + (*pindex->phashBlock == block.GetHash())); int64_t nTimeStart = GetTimeMicros(); // Check it again in case a previous version let a bad block in @@ -3342,7 +3345,8 @@ static bool CheckIndexAgainstCheckpoint(const CBlockIndex* pindexPrev, CValidati bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev, int64_t nAdjustedTime) { - const int nHeight = pindexPrev == NULL ? 0 : pindexPrev->nHeight + 1; + assert(pindexPrev != NULL); + const int nHeight = pindexPrev->nHeight + 1; // Check proof of work if(Params().NetworkIDString() == CBaseChainParams::MAIN && nHeight <= 68589){ // architecture issues with DGW v1 and v2) diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp index 9074d78c13c30..dd0a25892b7c6 100644 --- a/src/wallet/rpcwallet.cpp +++ b/src/wallet/rpcwallet.cpp @@ -2705,11 +2705,9 @@ UniValue listunspent(const JSONRPCRequest& request) " ,...\n" " ]\n" "4. include_unsafe (bool, optional, default=true) Include outputs that are not safe to spend\n" - " because they come from unconfirmed untrusted transactions or unconfirmed\n" - " replacement transactions (cases where we are less sure that a conflicting\n" - " transaction won't be mined).\n" - "\nResult:\n" - "[ (array of json object)\n" + " See description of \"safe\" attribute below.\n" + "\nResult\n" + "[ (array of json object)\n" " {\n" " \"txid\" : \"txid\", (string) the transaction id \n" " \"vout\" : n, (numeric) the vout value\n" @@ -2721,6 +2719,9 @@ UniValue listunspent(const JSONRPCRequest& request) " \"redeemScript\" : n (string) The redeemScript if scriptPubKey is P2SH\n" " \"spendable\" : xxx, (bool) Whether we have the private keys to spend this output\n" " \"solvable\" : xxx, (bool) Whether we know how to spend this output, ignoring the lack of keys\n" + " \"safe\" : xxx (bool) Whether this output is considered safe to spend. Unconfirmed transactions\n" + " from outside keys and unconfirmed replacement transactions are considered unsafe\n" + " and are not eligible for spending by fundrawtransaction and sendtoaddress.\n" " \"ps_rounds\" : n (numeric) The number of PS rounds\n" " }\n" " ,...\n" @@ -2806,6 +2807,7 @@ UniValue listunspent(const JSONRPCRequest& request) entry.push_back(Pair("confirmations", out.nDepth)); entry.push_back(Pair("spendable", out.fSpendable)); entry.push_back(Pair("solvable", out.fSolvable)); + entry.push_back(Pair("safe", out.fSafe)); entry.push_back(Pair("ps_rounds", pwallet->GetCappedOutpointPrivateSendRounds(COutPoint(out.tx->GetHash(), out.i)))); results.push_back(entry); } diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp index d5a63b8d106e5..90157fc098bfc 100644 --- a/src/wallet/test/wallet_tests.cpp +++ b/src/wallet/test/wallet_tests.cpp @@ -56,7 +56,7 @@ static void add_coin(const CAmount& nValue, int nAge = 6*24, bool fIsFromMe = fa wtx->fDebitCached = true; wtx->nDebitCached = 1; } - COutput output(wtx.get(), nInput, nAge, true, true); + COutput output(wtx.get(), nInput, nAge, true /* spendable */, true /* solvable */, true /* safe */); vCoins.push_back(output); wtxn.emplace_back(std::move(wtx)); } diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index b91a8f7071028..1fcd043cd78d8 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -2514,7 +2514,7 @@ CAmount CWallet::GetImmatureWatchOnlyBalance() const return nTotal; } -void CWallet::AvailableCoins(std::vector& vCoins, bool fOnlyConfirmed, const CCoinControl *coinControl, bool fIncludeZeroValue, AvailableCoinsType nCoinType, bool fUseInstantSend) const +void CWallet::AvailableCoins(std::vector& vCoins, bool fOnlySafe, const CCoinControl *coinControl, bool fIncludeZeroValue, AvailableCoinsType nCoinType, bool fUseInstantSend) const { vCoins.clear(); @@ -2530,9 +2530,6 @@ void CWallet::AvailableCoins(std::vector& vCoins, bool fOnlyConfirmed, if (!CheckFinalTx(*pcoin)) continue; - if (fOnlyConfirmed && !pcoin->IsTrusted()) - continue; - if (pcoin->IsCoinBase() && pcoin->GetBlocksToMaturity() > 0) continue; @@ -2546,6 +2543,12 @@ void CWallet::AvailableCoins(std::vector& vCoins, bool fOnlyConfirmed, if (nDepth == 0 && !pcoin->InMempool()) continue; + bool safeTx = pcoin->IsTrusted(); + + if (fOnlySafe && !safeTx) { + continue; + } + for (unsigned int i = 0; i < pcoin->tx->vout.size(); i++) { bool found = false; if(nCoinType == ONLY_DENOMINATED) { @@ -2570,7 +2573,7 @@ void CWallet::AvailableCoins(std::vector& vCoins, bool fOnlyConfirmed, vCoins.push_back(COutput(pcoin, i, nDepth, ((mine & ISMINE_SPENDABLE) != ISMINE_NO) || (coinControl && coinControl->fAllowWatchOnly && (mine & ISMINE_WATCH_SOLVABLE) != ISMINE_NO), - (mine & (ISMINE_SPENDABLE | ISMINE_WATCH_SOLVABLE)) != ISMINE_NO)); + (mine & (ISMINE_SPENDABLE | ISMINE_WATCH_SOLVABLE)) != ISMINE_NO, safeTx)); } } } @@ -3229,7 +3232,7 @@ int CWallet::CountInputsWithAmount(CAmount nInputAmount) int nDepth = pcoin->GetDepthInMainChain(); for (unsigned int i = 0; i < pcoin->tx->vout.size(); i++) { - COutput out = COutput(pcoin, i, nDepth, true, true); + COutput out = COutput(pcoin, i, nDepth, true, true, false); COutPoint outpoint = COutPoint(out.tx->GetHash(), out.i); if(out.tx->tx->vout[out.i].nValue != nInputAmount) continue; diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h index e929288228953..c8979d602888f 100644 --- a/src/wallet/wallet.h +++ b/src/wallet/wallet.h @@ -530,12 +530,23 @@ class COutput const CWalletTx *tx; int i; int nDepth; + + /** Whether we have the private keys to spend this output */ bool fSpendable; + + /** Whether we know how to spend this output, ignoring the lack of keys */ bool fSolvable; - COutput(const CWalletTx *txIn, int iIn, int nDepthIn, bool fSpendableIn, bool fSolvableIn) + /** + * Whether this output is considered safe to spend. Unconfirmed transactions + * from outside keys and unconfirmed replacement transactions are considered + * unsafe and will not be used to fund new spending transactions. + */ + bool fSafe; + + COutput(const CWalletTx *txIn, int iIn, int nDepthIn, bool fSpendableIn, bool fSolvableIn, bool fSafeIn) { - tx = txIn; i = iIn; nDepth = nDepthIn; fSpendable = fSpendableIn; fSolvable = fSolvableIn; + tx = txIn; i = iIn; nDepth = nDepthIn; fSpendable = fSpendableIn; fSolvable = fSolvableIn; fSafe = fSafeIn; } //Used with Darksend. Will return largest nondenom, then denominations, then very small inputs @@ -829,7 +840,7 @@ class CWallet : public CCryptoKeyStore, public CValidationInterface /** * populate vCoins with vector of available COutputs. */ - void AvailableCoins(std::vector& vCoins, bool fOnlyConfirmed=true, const CCoinControl *coinControl = NULL, bool fIncludeZeroValue=false, AvailableCoinsType nCoinType=ALL_COINS, bool fUseInstantSend = false) const; + void AvailableCoins(std::vector& vCoins, bool fOnlySafe=true, const CCoinControl *coinControl = NULL, bool fIncludeZeroValue=false, AvailableCoinsType nCoinType=ALL_COINS, bool fUseInstantSend = false) const; /** * Shuffle and select coins until nTargetValue is reached while avoiding