2 # Copyright (c) 2014-2016 The Bitcoin Core developers
3 # Distributed under the MIT software license, see the accompanying
4 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 """Test the pruning code.
8 This test uses 4GB of disk space.
9 This test takes 30 mins or more (up to 2 hours)
12 from test_framework
.test_framework
import BitcoinTestFramework
13 from test_framework
.util
import *
17 MIN_BLOCKS_TO_KEEP
= 288
19 # Rescans start at the earliest block up to 2 hours before a key timestamp, so
20 # the manual prune RPC avoids pruning blocks in the same window to be
21 # compatible with pruning based on key creation time.
22 TIMESTAMP_WINDOW
= 2 * 60 * 60
25 def calc_usage(blockdir
):
26 return sum(os
.path
.getsize(blockdir
+f
) for f
in os
.listdir(blockdir
) if os
.path
.isfile(blockdir
+f
)) / (1024. * 1024.)
28 class PruneTest(BitcoinTestFramework
):
32 self
.setup_clean_chain
= True
35 # Create nodes 0 and 1 to mine.
36 # Create node 2 to test pruning.
37 self
.full_node_default_args
= ["-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5", "-limitdescendantcount=100", "-limitdescendantsize=5000", "-limitancestorcount=100", "-limitancestorsize=5000" ]
38 # Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later)
39 # Create nodes 5 to test wallet in prune mode, but do not connect
40 self
.extra_args
= [self
.full_node_default_args
,
41 self
.full_node_default_args
,
42 ["-maxreceivebuffer=20000", "-prune=550"],
43 ["-maxreceivebuffer=20000", "-blockmaxsize=999000"],
44 ["-maxreceivebuffer=20000", "-blockmaxsize=999000"],
47 def setup_network(self
):
50 self
.prunedir
= self
.options
.tmpdir
+ "/node2/regtest/blocks/"
52 connect_nodes(self
.nodes
[0], 1)
53 connect_nodes(self
.nodes
[1], 2)
54 connect_nodes(self
.nodes
[2], 0)
55 connect_nodes(self
.nodes
[0], 3)
56 connect_nodes(self
.nodes
[0], 4)
57 sync_blocks(self
.nodes
[0:5])
59 def create_big_chain(self
):
60 # Start by creating some coinbases we can spend later
61 self
.nodes
[1].generate(200)
62 sync_blocks(self
.nodes
[0:2])
63 self
.nodes
[0].generate(150)
64 # Then mine enough full blocks to create more than 550MiB of data
66 mine_large_block(self
.nodes
[0], self
.utxo_cache_0
)
68 sync_blocks(self
.nodes
[0:5])
70 def test_height_min(self
):
71 if not os
.path
.isfile(self
.prunedir
+"blk00000.dat"):
72 raise AssertionError("blk00000.dat is missing, pruning too early")
73 self
.log
.info("Success")
74 self
.log
.info("Though we're already using more than 550MiB, current usage: %d" % calc_usage(self
.prunedir
))
75 self
.log
.info("Mining 25 more blocks should cause the first block file to be pruned")
76 # Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
78 mine_large_block(self
.nodes
[0], self
.utxo_cache_0
)
80 waitstart
= time
.time()
81 while os
.path
.isfile(self
.prunedir
+"blk00000.dat"):
83 if time
.time() - waitstart
> 30:
84 raise AssertionError("blk00000.dat not pruned when it should be")
86 self
.log
.info("Success")
87 usage
= calc_usage(self
.prunedir
)
88 self
.log
.info("Usage should be below target: %d" % usage
)
90 raise AssertionError("Pruning target not being met")
92 def create_chain_with_staleblocks(self
):
93 # Create stale blocks in manageable sized chunks
94 self
.log
.info("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds")
97 # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
98 # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
99 # Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
101 self
.nodes
[0]=self
.start_node(0, self
.options
.tmpdir
, self
.full_node_default_args
, timewait
=900)
102 # Mine 24 blocks in node 1
105 mine_large_block(self
.nodes
[1], self
.utxo_cache_1
)
107 # Add node1's wallet transactions back to the mempool, to
108 # avoid the mined blocks from being too small.
109 self
.nodes
[1].resendwallettransactions()
110 self
.nodes
[1].generate(1) #tx's already in mempool from previous disconnects
112 # Reorg back with 25 block chain from node 0
114 mine_large_block(self
.nodes
[0], self
.utxo_cache_0
)
116 # Create connections in the order so both nodes can see the reorg at the same time
117 connect_nodes(self
.nodes
[1], 0)
118 connect_nodes(self
.nodes
[2], 0)
119 sync_blocks(self
.nodes
[0:3])
121 self
.log
.info("Usage can be over target because of high stale rate: %d" % calc_usage(self
.prunedir
))
123 def reorg_test(self
):
124 # Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
125 # This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
126 # Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
127 # Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
129 self
.nodes
[1] = self
.start_node(1, self
.options
.tmpdir
, ["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait
=900)
131 height
= self
.nodes
[1].getblockcount()
132 self
.log
.info("Current block height: %d" % height
)
134 invalidheight
= height
-287
135 badhash
= self
.nodes
[1].getblockhash(invalidheight
)
136 self
.log
.info("Invalidating block %s at height %d" % (badhash
,invalidheight
))
137 self
.nodes
[1].invalidateblock(badhash
)
139 # We've now switched to our previously mined-24 block fork on node 1, but thats not what we want
140 # So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
141 mainchainhash
= self
.nodes
[0].getblockhash(invalidheight
- 1)
142 curhash
= self
.nodes
[1].getblockhash(invalidheight
- 1)
143 while curhash
!= mainchainhash
:
144 self
.nodes
[1].invalidateblock(curhash
)
145 curhash
= self
.nodes
[1].getblockhash(invalidheight
- 1)
147 assert(self
.nodes
[1].getblockcount() == invalidheight
- 1)
148 self
.log
.info("New best height: %d" % self
.nodes
[1].getblockcount())
150 # Reboot node1 to clear those giant tx's from mempool
152 self
.nodes
[1] = self
.start_node(1, self
.options
.tmpdir
, ["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait
=900)
154 self
.log
.info("Generating new longer chain of 300 more blocks")
155 self
.nodes
[1].generate(300)
157 self
.log
.info("Reconnect nodes")
158 connect_nodes(self
.nodes
[0], 1)
159 connect_nodes(self
.nodes
[2], 1)
160 sync_blocks(self
.nodes
[0:3], timeout
=120)
162 self
.log
.info("Verify height on node 2: %d" % self
.nodes
[2].getblockcount())
163 self
.log
.info("Usage possibly still high bc of stale blocks in block files: %d" % calc_usage(self
.prunedir
))
165 self
.log
.info("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)")
167 # Get node0's wallet transactions back in its mempool, to avoid the
168 # mined blocks from being too small.
169 self
.nodes
[0].resendwallettransactions()
172 # This can be slow, so do this in multiple RPC calls to avoid
174 self
.nodes
[0].generate(10) #node 0 has many large tx's in its mempool from the disconnects
175 sync_blocks(self
.nodes
[0:3], timeout
=300)
177 usage
= calc_usage(self
.prunedir
)
178 self
.log
.info("Usage should be below target: %d" % usage
)
180 raise AssertionError("Pruning target not being met")
182 return invalidheight
,badhash
184 def reorg_back(self
):
185 # Verify that a block on the old main chain fork has been pruned away
186 assert_raises_jsonrpc(-1, "Block not available (pruned data)", self
.nodes
[2].getblock
, self
.forkhash
)
187 self
.log
.info("Will need to redownload block %d" % self
.forkheight
)
189 # Verify that we have enough history to reorg back to the fork point
190 # Although this is more than 288 blocks, because this chain was written more recently
191 # and only its other 299 small and 220 large block are in the block files after it,
192 # its expected to still be retained
193 self
.nodes
[2].getblock(self
.nodes
[2].getblockhash(self
.forkheight
))
195 first_reorg_height
= self
.nodes
[2].getblockcount()
196 curchainhash
= self
.nodes
[2].getblockhash(self
.mainchainheight
)
197 self
.nodes
[2].invalidateblock(curchainhash
)
198 goalbestheight
= self
.mainchainheight
199 goalbesthash
= self
.mainchainhash2
201 # As of 0.10 the current block download logic is not able to reorg to the original chain created in
202 # create_chain_with_stale_blocks because it doesn't know of any peer thats on that chain from which to
203 # redownload its missing blocks.
204 # Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain
205 # because it has all the block data.
206 # However it must mine enough blocks to have a more work chain than the reorg_test chain in order
207 # to trigger node 2's block download logic.
208 # At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg
209 if self
.nodes
[2].getblockcount() < self
.mainchainheight
:
210 blocks_to_mine
= first_reorg_height
+ 1 - self
.mainchainheight
211 self
.log
.info("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: %d" % blocks_to_mine
)
212 self
.nodes
[0].invalidateblock(curchainhash
)
213 assert(self
.nodes
[0].getblockcount() == self
.mainchainheight
)
214 assert(self
.nodes
[0].getbestblockhash() == self
.mainchainhash2
)
215 goalbesthash
= self
.nodes
[0].generate(blocks_to_mine
)[-1]
216 goalbestheight
= first_reorg_height
+ 1
218 self
.log
.info("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload")
219 waitstart
= time
.time()
220 while self
.nodes
[2].getblockcount() < goalbestheight
:
222 if time
.time() - waitstart
> 900:
223 raise AssertionError("Node 2 didn't reorg to proper height")
224 assert(self
.nodes
[2].getbestblockhash() == goalbesthash
)
225 # Verify we can now have the data for a block previously pruned
226 assert(self
.nodes
[2].getblock(self
.forkhash
)["height"] == self
.forkheight
)
228 def manual_test(self
, node_number
, use_timestamp
):
229 # at this point, node has 995 blocks and has not yet run in prune mode
230 node
= self
.nodes
[node_number
] = self
.start_node(node_number
, self
.options
.tmpdir
, timewait
=900)
231 assert_equal(node
.getblockcount(), 995)
232 assert_raises_jsonrpc(-1, "not in prune mode", node
.pruneblockchain
, 500)
233 self
.stop_node(node_number
)
235 # now re-start in manual pruning mode
236 node
= self
.nodes
[node_number
] = self
.start_node(node_number
, self
.options
.tmpdir
, ["-prune=1"], timewait
=900)
237 assert_equal(node
.getblockcount(), 995)
241 return node
.getblockheader(node
.getblockhash(index
))["time"] + TIMESTAMP_WINDOW
245 def prune(index
, expected_ret
=None):
246 ret
= node
.pruneblockchain(height(index
))
247 # Check the return value. When use_timestamp is True, just check
248 # that the return value is less than or equal to the expected
249 # value, because when more than one block is generated per second,
250 # a timestamp will not be granular enough to uniquely identify an
252 if expected_ret
is None:
255 assert_greater_than(ret
, 0)
256 assert_greater_than(expected_ret
+ 1, ret
)
258 assert_equal(ret
, expected_ret
)
260 def has_block(index
):
261 return os
.path
.isfile(self
.options
.tmpdir
+ "/node{}/regtest/blocks/blk{:05}.dat".format(node_number
, index
))
263 # should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000)
264 assert_raises_jsonrpc(-1, "Blockchain is too short for pruning", node
.pruneblockchain
, height(500))
266 # mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight)
268 assert_equal(node
.getblockchaininfo()["blocks"], 1001)
270 # negative heights should raise an exception
271 assert_raises_jsonrpc(-8, "Negative", node
.pruneblockchain
, -10)
273 # height=100 too low to prune first block file so this is a no-op
276 raise AssertionError("blk00000.dat is missing when should still be there")
279 node
.pruneblockchain(height(0))
281 raise AssertionError("blk00000.dat is missing when should still be there")
283 # height=500 should prune first file
286 raise AssertionError("blk00000.dat is still there, should be pruned by now")
288 raise AssertionError("blk00001.dat is missing when should still be there")
290 # height=650 should prune second file
293 raise AssertionError("blk00001.dat is still there, should be pruned by now")
295 # height=1000 should not prune anything more, because tip-288 is in blk00002.dat.
296 prune(1000, 1001 - MIN_BLOCKS_TO_KEEP
)
298 raise AssertionError("blk00002.dat is still there, should be pruned by now")
300 # advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat)
304 raise AssertionError("blk00002.dat is still there, should be pruned by now")
306 raise AssertionError("blk00003.dat is still there, should be pruned by now")
308 # stop node, start back up with auto-prune at 550MB, make sure still runs
309 self
.stop_node(node_number
)
310 self
.nodes
[node_number
] = self
.start_node(node_number
, self
.options
.tmpdir
, ["-prune=550"], timewait
=900)
312 self
.log
.info("Success")
314 def wallet_test(self
):
315 # check that the pruning node's wallet is still in good shape
316 self
.log
.info("Stop and start pruning node to trigger wallet rescan")
318 self
.nodes
[2] = self
.start_node(2, self
.options
.tmpdir
, ["-prune=550"])
319 self
.log
.info("Success")
321 # check that wallet loads successfully when restarting a pruned node after IBD.
322 # this was reported to fail in #7494.
323 self
.log
.info("Syncing node 5 to test wallet")
324 connect_nodes(self
.nodes
[0], 5)
325 nds
= [self
.nodes
[0], self
.nodes
[5]]
326 sync_blocks(nds
, wait
=5, timeout
=300)
327 self
.stop_node(5) #stop and start to trigger rescan
328 self
.nodes
[5] = self
.start_node(5, self
.options
.tmpdir
, ["-prune=550"])
329 self
.log
.info("Success")
332 self
.log
.info("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)")
333 self
.log
.info("Mining a big blockchain of 995 blocks")
335 # Determine default relay fee
336 self
.relayfee
= self
.nodes
[0].getnetworkinfo()["relayfee"]
338 # Cache for utxos, as the listunspent may take a long time later in the test
339 self
.utxo_cache_0
= []
340 self
.utxo_cache_1
= []
342 self
.create_big_chain()
344 # * blocks on main chain
345 # +,&,$,@ blocks on other forks
346 # X invalidated block
349 # Start by mining a simple chain that all nodes have
350 # N0=N1=N2 **...*(995)
352 # stop manual-pruning node with 995 blocks
356 self
.log
.info("Check that we haven't started pruning yet because we're below PruneAfterHeight")
357 self
.test_height_min()
358 # Extend this chain past the PruneAfterHeight
359 # N0=N1=N2 **...*(1020)
361 self
.log
.info("Check that we'll exceed disk space target if we have a very high stale block rate")
362 self
.create_chain_with_staleblocks()
364 # And mine a 24 block chain on N1 and a separate 25 block chain on N0
365 # N1=N2 **...*+...+(1044)
366 # N0 **...**...**(1045)
368 # reconnect nodes causing reorg on N1 and N2
369 # N1=N2 **...*(1020) *...**(1045)
373 # repeat this process until you have 12 stale forks hanging off the
374 # main chain on N1 and N2
375 # N0 *************************...***************************(1320)
377 # N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320)
379 # +...+(1044) &.. $...$(1319)
381 # Save some current chain state for later use
382 self
.mainchainheight
= self
.nodes
[2].getblockcount() #1320
383 self
.mainchainhash2
= self
.nodes
[2].getblockhash(self
.mainchainheight
)
385 self
.log
.info("Check that we can survive a 288 block reorg still")
386 (self
.forkheight
,self
.forkhash
) = self
.reorg_test() #(1033, )
387 # Now create a 288 block reorg by mining a longer chain on N1
388 # First disconnect N1
389 # Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain
390 # N1 **...*(1020) **...**(1032)X..
394 # Now mine 300 more blocks on N1
395 # N1 **...*(1020) **...**(1032) @@...@(1332)
401 # Reconnect nodes and mine 220 more blocks on N1
402 # N1 **...*(1020) **...**(1032) @@...@@@(1552)
408 # N2 **...*(1020) **...**(1032) @@...@@@(1552)
414 # N0 ********************(1032) @@...@@@(1552)
418 self
.log
.info("Test that we can rerequest a block we previously pruned if needed for a reorg")
420 # Verify that N2 still has block 1033 on current chain (@), but not on main chain (*)
421 # Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to
422 # original main chain (*), but will require redownload of some blocks
423 # In order to have a peer we think we can download from, must also perform this invalidation
424 # on N0 and mine a new longest chain to trigger.
426 # N0 ********************(1032) **...****(1553)
430 # N2 **...*(1020) **...**(1032) **...****(1553)
436 # N1 doesn't change because 1033 on main chain (*) is invalid
438 self
.log
.info("Test manual pruning with block indices")
439 self
.manual_test(3, use_timestamp
=False)
441 self
.log
.info("Test manual pruning with timestamps")
442 self
.manual_test(4, use_timestamp
=True)
444 self
.log
.info("Test wallet re-scan")
447 self
.log
.info("Done")
449 if __name__
== '__main__':