Fix -disablewallet default value
[bitcoinplatinum.git] / test / functional / p2p-acceptblock.py
blob220b7763694ed54f14b3403722ddacc5a544bad8
1 #!/usr/bin/env python3
2 # Copyright (c) 2015-2016 The Bitcoin Core developers
3 # Distributed under the MIT software license, see the accompanying
4 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 """Test processing of unrequested blocks.
7 Setup: two nodes, node0+node1, not connected to each other. Node1 will have
8 nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.
10 We have one NodeConn connection to node0 called test_node, and one to node1
11 called min_work_node.
13 The test:
14 1. Generate one block on each node, to leave IBD.
16 2. Mine a new block on each tip, and deliver to each node from node's peer.
17 The tip should advance for node0, but node1 should skip processing due to
18 nMinimumChainWork.
20 Node1 is unused in tests 3-7:
22 3. Mine a block that forks from the genesis block, and deliver to test_node.
23 Node0 should not process this block (just accept the header), because it
24 is unrequested and doesn't have more or equal work to the tip.
26 4a,b. Send another two blocks that build on the forking block.
27 Node0 should process the second block but be stuck on the shorter chain,
28 because it's missing an intermediate block.
30 4c.Send 288 more blocks on the longer chain (the number of blocks ahead
31 we currently store).
32 Node0 should process all but the last block (too far ahead in height).
34 5. Send a duplicate of the block in #3 to Node0.
35 Node0 should not process the block because it is unrequested, and stay on
36 the shorter chain.
38 6. Send Node0 an inv for the height 3 block produced in #4 above.
39 Node0 should figure out that Node0 has the missing height 2 block and send a
40 getdata.
42 7. Send Node0 the missing block again.
43 Node0 should process and the tip should advance.
45 8. Create a fork which is invalid at a height longer than the current chain
46 (ie to which the node will try to reorg) but which has headers built on top
47 of the invalid block. Check that we get disconnected if we send more headers
48 on the chain the node now knows to be invalid.
50 9. Test Node1 is able to sync when connected to node0 (which should have sufficient
51 work on its chain).
52 """
54 from test_framework.mininode import *
55 from test_framework.test_framework import BitcoinTestFramework
56 from test_framework.util import *
57 import time
58 from test_framework.blocktools import create_block, create_coinbase, create_transaction
60 class AcceptBlockTest(BitcoinTestFramework):
61 def add_options(self, parser):
62 parser.add_option("--testbinary", dest="testbinary",
63 default=os.getenv("BITCOIND", "bitcoind"),
64 help="bitcoind binary to test")
66 def set_test_params(self):
67 self.setup_clean_chain = True
68 self.num_nodes = 2
69 self.extra_args = [[], ["-minimumchainwork=0x10"]]
71 def setup_network(self):
72 # Node0 will be used to test behavior of processing unrequested blocks
73 # from peers which are not whitelisted, while Node1 will be used for
74 # the whitelisted case.
75 # Node2 will be used for non-whitelisted peers to test the interaction
76 # with nMinimumChainWork.
77 self.setup_nodes()
79 def run_test(self):
80 # Setup the p2p connections and start up the network thread.
81 test_node = NodeConnCB() # connects to node0
82 min_work_node = NodeConnCB() # connects to node1
84 connections = []
85 connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
86 connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], min_work_node))
87 test_node.add_connection(connections[0])
88 min_work_node.add_connection(connections[1])
90 NetworkThread().start() # Start up network handling in another thread
92 # Test logic begins here
93 test_node.wait_for_verack()
94 min_work_node.wait_for_verack()
96 # 1. Have nodes mine a block (leave IBD)
97 [ n.generate(1) for n in self.nodes ]
98 tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
100 # 2. Send one block that builds on each tip.
101 # This should be accepted by node0
102 blocks_h2 = [] # the height 2 blocks on each node's chain
103 block_time = int(time.time()) + 1
104 for i in range(2):
105 blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
106 blocks_h2[i].solve()
107 block_time += 1
108 test_node.send_message(msg_block(blocks_h2[0]))
109 min_work_node.send_message(msg_block(blocks_h2[1]))
111 for x in [test_node, min_work_node]:
112 x.sync_with_ping()
113 assert_equal(self.nodes[0].getblockcount(), 2)
114 assert_equal(self.nodes[1].getblockcount(), 1)
115 self.log.info("First height 2 block accepted by node0; correctly rejected by node1")
117 # 3. Send another block that builds on genesis.
118 block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)
119 block_time += 1
120 block_h1f.solve()
121 test_node.send_message(msg_block(block_h1f))
123 test_node.sync_with_ping()
124 tip_entry_found = False
125 for x in self.nodes[0].getchaintips():
126 if x['hash'] == block_h1f.hash:
127 assert_equal(x['status'], "headers-only")
128 tip_entry_found = True
129 assert(tip_entry_found)
130 assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
132 # 4. Send another two block that build on the fork.
133 block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time)
134 block_time += 1
135 block_h2f.solve()
136 test_node.send_message(msg_block(block_h2f))
138 test_node.sync_with_ping()
139 # Since the earlier block was not processed by node, the new block
140 # can't be fully validated.
141 tip_entry_found = False
142 for x in self.nodes[0].getchaintips():
143 if x['hash'] == block_h2f.hash:
144 assert_equal(x['status'], "headers-only")
145 tip_entry_found = True
146 assert(tip_entry_found)
148 # But this block should be accepted by node since it has equal work.
149 self.nodes[0].getblock(block_h2f.hash)
150 self.log.info("Second height 2 block accepted, but not reorg'ed to")
152 # 4b. Now send another block that builds on the forking chain.
153 block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1)
154 block_h3.solve()
155 test_node.send_message(msg_block(block_h3))
157 test_node.sync_with_ping()
158 # Since the earlier block was not processed by node, the new block
159 # can't be fully validated.
160 tip_entry_found = False
161 for x in self.nodes[0].getchaintips():
162 if x['hash'] == block_h3.hash:
163 assert_equal(x['status'], "headers-only")
164 tip_entry_found = True
165 assert(tip_entry_found)
166 self.nodes[0].getblock(block_h3.hash)
168 # But this block should be accepted by node since it has more work.
169 self.nodes[0].getblock(block_h3.hash)
170 self.log.info("Unrequested more-work block accepted")
172 # 4c. Now mine 288 more blocks and deliver; all should be processed but
173 # the last (height-too-high) on node (as long as its not missing any headers)
174 tip = block_h3
175 all_blocks = []
176 for i in range(288):
177 next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1)
178 next_block.solve()
179 all_blocks.append(next_block)
180 tip = next_block
182 # Now send the block at height 5 and check that it wasn't accepted (missing header)
183 test_node.send_message(msg_block(all_blocks[1]))
184 test_node.sync_with_ping()
185 assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
186 assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
188 # The block at height 5 should be accepted if we provide the missing header, though
189 headers_message = msg_headers()
190 headers_message.headers.append(CBlockHeader(all_blocks[0]))
191 test_node.send_message(headers_message)
192 test_node.send_message(msg_block(all_blocks[1]))
193 test_node.sync_with_ping()
194 self.nodes[0].getblock(all_blocks[1].hash)
196 # Now send the blocks in all_blocks
197 for i in range(288):
198 test_node.send_message(msg_block(all_blocks[i]))
199 test_node.sync_with_ping()
201 # Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
202 for x in all_blocks[:-1]:
203 self.nodes[0].getblock(x.hash)
204 assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
206 # 5. Test handling of unrequested block on the node that didn't process
207 # Should still not be processed (even though it has a child that has more
208 # work).
210 # The node should have requested the blocks at some point, so
211 # disconnect/reconnect first
212 connections[0].disconnect_node()
213 test_node.wait_for_disconnect()
215 test_node = NodeConnCB() # connects to node (not whitelisted)
216 connections[0] = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)
217 test_node.add_connection(connections[0])
219 test_node.wait_for_verack()
220 test_node.send_message(msg_block(block_h1f))
222 test_node.sync_with_ping()
223 assert_equal(self.nodes[0].getblockcount(), 2)
224 self.log.info("Unrequested block that would complete more-work chain was ignored")
226 # 6. Try to get node to request the missing block.
227 # Poke the node with an inv for block at height 3 and see if that
228 # triggers a getdata on block 2 (it should if block 2 is missing).
229 with mininode_lock:
230 # Clear state so we can check the getdata request
231 test_node.last_message.pop("getdata", None)
232 test_node.send_message(msg_inv([CInv(2, block_h3.sha256)]))
234 test_node.sync_with_ping()
235 with mininode_lock:
236 getdata = test_node.last_message["getdata"]
238 # Check that the getdata includes the right block
239 assert_equal(getdata.inv[0].hash, block_h1f.sha256)
240 self.log.info("Inv at tip triggered getdata for unprocessed block")
242 # 7. Send the missing block for the third time (now it is requested)
243 test_node.send_message(msg_block(block_h1f))
245 test_node.sync_with_ping()
246 assert_equal(self.nodes[0].getblockcount(), 290)
247 self.nodes[0].getblock(all_blocks[286].hash)
248 assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
249 assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash)
250 self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
252 # 8. Create a chain which is invalid at a height longer than the
253 # current chain, but which has more blocks on top of that
254 block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1)
255 block_289f.solve()
256 block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1)
257 block_290f.solve()
258 block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1)
259 # block_291 spends a coinbase below maturity!
260 block_291.vtx.append(create_transaction(block_290f.vtx[0], 0, b"42", 1))
261 block_291.hashMerkleRoot = block_291.calc_merkle_root()
262 block_291.solve()
263 block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1)
264 block_292.solve()
266 # Now send all the headers on the chain and enough blocks to trigger reorg
267 headers_message = msg_headers()
268 headers_message.headers.append(CBlockHeader(block_289f))
269 headers_message.headers.append(CBlockHeader(block_290f))
270 headers_message.headers.append(CBlockHeader(block_291))
271 headers_message.headers.append(CBlockHeader(block_292))
272 test_node.send_message(headers_message)
274 test_node.sync_with_ping()
275 tip_entry_found = False
276 for x in self.nodes[0].getchaintips():
277 if x['hash'] == block_292.hash:
278 assert_equal(x['status'], "headers-only")
279 tip_entry_found = True
280 assert(tip_entry_found)
281 assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash)
283 test_node.send_message(msg_block(block_289f))
284 test_node.send_message(msg_block(block_290f))
286 test_node.sync_with_ping()
287 self.nodes[0].getblock(block_289f.hash)
288 self.nodes[0].getblock(block_290f.hash)
290 test_node.send_message(msg_block(block_291))
292 # At this point we've sent an obviously-bogus block, wait for full processing
293 # without assuming whether we will be disconnected or not
294 try:
295 # Only wait a short while so the test doesn't take forever if we do get
296 # disconnected
297 test_node.sync_with_ping(timeout=1)
298 except AssertionError:
299 test_node.wait_for_disconnect()
301 test_node = NodeConnCB() # connects to node (not whitelisted)
302 connections[0] = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)
303 test_node.add_connection(connections[0])
305 NetworkThread().start() # Start up network handling in another thread
306 test_node.wait_for_verack()
308 # We should have failed reorg and switched back to 290 (but have block 291)
309 assert_equal(self.nodes[0].getblockcount(), 290)
310 assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
311 assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1)
313 # Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected
314 block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1)
315 block_293.solve()
316 headers_message = msg_headers()
317 headers_message.headers.append(CBlockHeader(block_293))
318 test_node.send_message(headers_message)
319 test_node.wait_for_disconnect()
321 # 9. Connect node1 to node0 and ensure it is able to sync
322 connect_nodes(self.nodes[0], 1)
323 sync_blocks([self.nodes[0], self.nodes[1]])
324 self.log.info("Successfully synced nodes 1 and 0")
326 [ c.disconnect_node() for c in connections ]
328 if __name__ == '__main__':
329 AcceptBlockTest().main()