From 914bd78770445bee6c42b26a3355d89f0c66349d Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Fri, 10 Mar 2017 23:04:58 +0100 Subject: [PATCH] Merge #9972: Fix extended rpc tests broken by #9768 d055bd6 Fix extended rpc tests broken by 8910b4717e5bb946ee6988f7fe9fd461f53a5935 (John Newbery) Tree-SHA512: 30e019bd7e17013143e419777491ac0efd9760dddac7232d320c9afe80bc2fb383acd331e20cd5b3e21df1177c0e4a5221c99f831e90cf869d3affca206b9728 --- qa/rpc-tests/maxblocksinflight.py | 2 ++ qa/rpc-tests/pruning.py | 22 +++++++++++----------- qa/rpc-tests/smartfees.py | 7 ++++++- 3 files changed, 19 insertions(+), 12 deletions(-) diff --git a/qa/rpc-tests/maxblocksinflight.py b/qa/rpc-tests/maxblocksinflight.py index 6a1c016d0e6f2..9eaa254649fd9 100755 --- a/qa/rpc-tests/maxblocksinflight.py +++ b/qa/rpc-tests/maxblocksinflight.py @@ -87,6 +87,8 @@ def setup_network(self): def run_test(self): test = TestManager() + # pass log handler through to the test manager object + test.log = self.log test.add_new_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test)) NetworkThread().start() # Start up network handling in another thread test.run() diff --git a/qa/rpc-tests/pruning.py b/qa/rpc-tests/pruning.py index c6a82b5feaa74..cc84c8c085728 100755 --- a/qa/rpc-tests/pruning.py +++ b/qa/rpc-tests/pruning.py @@ -80,7 +80,7 @@ def test_height_min(self): if not os.path.isfile(self.prunedir+"blk00000.dat"): raise AssertionError("blk00000.dat is missing, pruning too early") self.log.info("Success") - self.log.info("Though we're already using more than 550MiB, current usage:", calc_usage(self.prunedir)) + self.log.info("Though we're already using more than 550MiB, current usage: %d" % calc_usage(self.prunedir)) self.log.info("Mining 25 more blocks should cause the first block file to be pruned") # Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this for i in range(25): @@ -94,7 +94,7 @@ def test_height_min(self): self.log.info("Success") usage = calc_usage(self.prunedir) - self.log.info("Usage should be below target:", usage) + self.log.info("Usage should be below target: %d" % usage) if (usage > 550): raise AssertionError("Pruning target not being met") @@ -124,7 +124,7 @@ def create_chain_with_staleblocks(self): connect_nodes(self.nodes[2], 0) sync_blocks(self.nodes[0:3]) - self.log.info("Usage can be over target because of high stale rate:", calc_usage(self.prunedir)) + self.log.info("Usage can be over target because of high stale rate: %d" % calc_usage(self.prunedir)) def reorg_test(self): # Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip @@ -135,11 +135,11 @@ def reorg_test(self): self.nodes[1]=start_node(1, self.options.tmpdir, ["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900) height = self.nodes[1].getblockcount() - self.log.info("Current block height:", height) + self.log.info("Current block height: %d" % height) invalidheight = height-287 badhash = self.nodes[1].getblockhash(invalidheight) - self.log.info("Invalidating block at height:",invalidheight,badhash) + self.log.info("Invalidating block %s at height %d" % (badhash,invalidheight)) self.nodes[1].invalidateblock(badhash) # We've now switched to our previously mined-24 block fork on node 1, but thats not what we want @@ -151,7 +151,7 @@ def reorg_test(self): curhash = self.nodes[1].getblockhash(invalidheight - 1) assert(self.nodes[1].getblockcount() == invalidheight - 1) - self.log.info("New best height", self.nodes[1].getblockcount()) + self.log.info("New best height: %d" % self.nodes[1].getblockcount()) # Reboot node1 to clear those giant tx's from mempool self.stop_node(1) @@ -165,8 +165,8 @@ def reorg_test(self): connect_nodes(self.nodes[2], 1) sync_blocks(self.nodes[0:3], timeout=120) - self.log.info("Verify height on node 2:",self.nodes[2].getblockcount()) - self.log.info("Usage possibly still high bc of stale blocks in block files:", calc_usage(self.prunedir)) + self.log.info("Verify height on node 2: %d" % self.nodes[2].getblockcount()) + self.log.info("Usage possibly still high bc of stale blocks in block files: %d" % calc_usage(self.prunedir)) self.log.info("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)") for i in range(22): @@ -176,7 +176,7 @@ def reorg_test(self): sync_blocks(self.nodes[0:3], timeout=300) usage = calc_usage(self.prunedir) - self.log.info("Usage should be below target:", usage) + self.log.info("Usage should be below target: %d" % usage) if (usage > 550): raise AssertionError("Pruning target not being met") @@ -185,7 +185,7 @@ def reorg_test(self): def reorg_back(self): # Verify that a block on the old main chain fork has been pruned away assert_raises_jsonrpc(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash) - self.log.info("Will need to redownload block",self.forkheight) + self.log.info("Will need to redownload block %d" % self.forkheight) # Verify that we have enough history to reorg back to the fork point # Although this is more than 288 blocks, because this chain was written more recently @@ -209,7 +209,7 @@ def reorg_back(self): # At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg if self.nodes[2].getblockcount() < self.mainchainheight: blocks_to_mine = first_reorg_height + 1 - self.mainchainheight - self.log.info("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed:", blocks_to_mine) + self.log.info("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: %d" % blocks_to_mine) self.nodes[0].invalidateblock(curchainhash) assert(self.nodes[0].getblockcount() == self.mainchainheight) assert(self.nodes[0].getbestblockhash() == self.mainchainhash2) diff --git a/qa/rpc-tests/smartfees.py b/qa/rpc-tests/smartfees.py index 9035ea210a640..37a1c2ec4a449 100644 --- a/qa/rpc-tests/smartfees.py +++ b/qa/rpc-tests/smartfees.py @@ -17,6 +17,8 @@ # 4 bytes of OP_TRUE and push 2-byte redeem script of "OP_1 OP_DROP" or "OP_2 OP_DROP" SCRIPT_SIG = ["0451025175", "0451025275"] +global log + class DecimalEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, Decimal): @@ -120,7 +122,7 @@ def check_estimates(node, fees_seen, max_invalid, print_estimates = True): """ all_estimates = [ node.estimatefee(i) for i in range(1,26) ] if print_estimates: - self.log.info([str(all_estimates[e-1]) for e in [1,2,3,6,15,25]]) + log.info([str(all_estimates[e-1]) for e in [1,2,3,6,15,25]]) delta = 1.0e-6 # account for rounding error last_e = max(fees_seen) for e in [x for x in all_estimates if x >= 0]: @@ -257,6 +259,9 @@ def transact_and_mine(self, numblocks, mining_node): self.memutxo = newmem def run_test(self): + # Make log handler available to helper functions + global log + log = self.log self.fees_per_kb = [] self.memutxo = [] self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting