2 # Copyright (c) 2015-2016 The Bitcoin Core developers
3 # Distributed under the MIT software license, see the accompanying
4 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 """Test behavior of -maxuploadtarget.
7 * Verify that getdata requests for old blocks (>1week) are dropped
8 if uploadtarget has been reached.
9 * Verify that getdata requests for recent blocks are respecteved even
10 if uploadtarget has been reached.
11 * Verify that the upload counters are reset after 24 hours.
13 from collections
import defaultdict
16 from test_framework
.mininode
import *
17 from test_framework
.test_framework
import BitcoinTestFramework
18 from test_framework
.util
import *
20 class TestNode(P2PInterface
):
23 self
.block_receive_map
= defaultdict(int)
25 def on_inv(self
, message
):
28 def on_block(self
, message
):
29 message
.block
.calc_sha256()
30 self
.block_receive_map
[message
.block
.sha256
] += 1
32 class MaxUploadTest(BitcoinTestFramework
):
34 def set_test_params(self
):
35 self
.setup_clean_chain
= True
37 self
.extra_args
= [["-maxuploadtarget=800", "-blockmaxsize=999000"]]
39 # Cache for utxos, as the listunspent may take a long time later in the test
43 # Before we connect anything, we first set the time on the node
44 # to be in the past, otherwise things break because the CNode
45 # time counters can't be reset backward after initialization
46 old_time
= int(time
.time() - 2*60*60*24*7)
47 self
.nodes
[0].setmocktime(old_time
)
49 # Generate some old blocks
50 self
.nodes
[0].generate(130)
52 # p2p_conns[0] will only request old blocks
53 # p2p_conns[1] will only request new blocks
54 # p2p_conns[2] will test resetting the counters
58 p2p_conns
.append(self
.nodes
[0].add_p2p_connection(TestNode()))
60 network_thread_start()
61 for p2pc
in p2p_conns
:
62 p2pc
.wait_for_verack()
64 # Test logic begins here
66 # Now mine a big block
67 mine_large_block(self
.nodes
[0], self
.utxo_cache
)
69 # Store the hash; we'll request this later
70 big_old_block
= self
.nodes
[0].getbestblockhash()
71 old_block_size
= self
.nodes
[0].getblock(big_old_block
, True)['size']
72 big_old_block
= int(big_old_block
, 16)
74 # Advance to two days ago
75 self
.nodes
[0].setmocktime(int(time
.time()) - 2*60*60*24)
77 # Mine one more block, so that the prior block looks old
78 mine_large_block(self
.nodes
[0], self
.utxo_cache
)
80 # We'll be requesting this new block too
81 big_new_block
= self
.nodes
[0].getbestblockhash()
82 big_new_block
= int(big_new_block
, 16)
84 # p2p_conns[0] will test what happens if we just keep requesting the
85 # the same big old block too many times (expect: disconnect)
87 getdata_request
= msg_getdata()
88 getdata_request
.inv
.append(CInv(2, big_old_block
))
90 max_bytes_per_day
= 800*1024*1024
91 daily_buffer
= 144 * 4000000
92 max_bytes_available
= max_bytes_per_day
- daily_buffer
93 success_count
= max_bytes_available
// old_block_size
95 # 576MB will be reserved for relaying new blocks, so expect this to
96 # succeed for ~235 tries.
97 for i
in range(success_count
):
98 p2p_conns
[0].send_message(getdata_request
)
99 p2p_conns
[0].sync_with_ping()
100 assert_equal(p2p_conns
[0].block_receive_map
[big_old_block
], i
+1)
102 assert_equal(len(self
.nodes
[0].getpeerinfo()), 3)
103 # At most a couple more tries should succeed (depending on how long
104 # the test has been running so far).
106 p2p_conns
[0].send_message(getdata_request
)
107 p2p_conns
[0].wait_for_disconnect()
108 assert_equal(len(self
.nodes
[0].getpeerinfo()), 2)
109 self
.log
.info("Peer 0 disconnected after downloading old block too many times")
111 # Requesting the current block on p2p_conns[1] should succeed indefinitely,
112 # even when over the max upload target.
113 # We'll try 800 times
114 getdata_request
.inv
= [CInv(2, big_new_block
)]
116 p2p_conns
[1].send_message(getdata_request
)
117 p2p_conns
[1].sync_with_ping()
118 assert_equal(p2p_conns
[1].block_receive_map
[big_new_block
], i
+1)
120 self
.log
.info("Peer 1 able to repeatedly download new block")
122 # But if p2p_conns[1] tries for an old block, it gets disconnected too.
123 getdata_request
.inv
= [CInv(2, big_old_block
)]
124 p2p_conns
[1].send_message(getdata_request
)
125 p2p_conns
[1].wait_for_disconnect()
126 assert_equal(len(self
.nodes
[0].getpeerinfo()), 1)
128 self
.log
.info("Peer 1 disconnected after trying to download old block")
130 self
.log
.info("Advancing system time on node to clear counters...")
132 # If we advance the time by 24 hours, then the counters should reset,
133 # and p2p_conns[2] should be able to retrieve the old block.
134 self
.nodes
[0].setmocktime(int(time
.time()))
135 p2p_conns
[2].sync_with_ping()
136 p2p_conns
[2].send_message(getdata_request
)
137 p2p_conns
[2].sync_with_ping()
138 assert_equal(p2p_conns
[2].block_receive_map
[big_old_block
], 1)
140 self
.log
.info("Peer 2 able to download old block")
142 self
.nodes
[0].disconnect_p2ps()
144 #stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
145 self
.log
.info("Restarting nodes with -whitelist=127.0.0.1")
147 self
.start_node(0, ["-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"])
149 # Reconnect to self.nodes[0]
150 self
.nodes
[0].add_p2p_connection(TestNode())
152 network_thread_start()
153 self
.nodes
[0].p2p
.wait_for_verack()
155 #retrieve 20 blocks which should be enough to break the 1MB limit
156 getdata_request
.inv
= [CInv(2, big_new_block
)]
158 self
.nodes
[0].p2p
.send_message(getdata_request
)
159 self
.nodes
[0].p2p
.sync_with_ping()
160 assert_equal(self
.nodes
[0].p2p
.block_receive_map
[big_new_block
], i
+1)
162 getdata_request
.inv
= [CInv(2, big_old_block
)]
163 self
.nodes
[0].p2p
.send_and_ping(getdata_request
)
164 assert_equal(len(self
.nodes
[0].getpeerinfo()), 1) #node is still connected because of the whitelist
166 self
.log
.info("Peer still connected after trying to download old block (whitelisted)")
168 if __name__
== '__main__':
169 MaxUploadTest().main()